aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2011-12-20 09:43:53 -0500
committerDave Airlie <airlied@redhat.com>2011-12-20 09:43:53 -0500
commit1fbe6f625f69e48c4001051dc1431afc704acfaa (patch)
tree826b741201a2e09a627ed350c6ff36935f5cff79
parent0cecdd818cd79d092e36e70dfe3a71f2878d6b96 (diff)
parent384703b8e6cd4c8ef08512e596024e028c91c339 (diff)
Merge tag 'v3.2-rc6' of /home/airlied/devel/kernel/linux-2.6 into drm-core-next
Merge in the upstream tree to bring in the mainline fixes. Conflicts: drivers/gpu/drm/exynos/exynos_drm_fbdev.c drivers/gpu/drm/nouveau/nouveau_sgdma.c
-rw-r--r--.mailmap2
-rw-r--r--CREDITS9
-rw-r--r--Documentation/ABI/testing/sysfs-block13
-rw-r--r--Documentation/ABI/testing/sysfs-bus-rbd7
-rw-r--r--Documentation/DocBook/drm.tmpl308
-rw-r--r--Documentation/DocBook/uio-howto.tmpl7
-rw-r--r--Documentation/blockdev/cciss.txt14
-rw-r--r--Documentation/cgroups/freezer-subsystem.txt4
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/filesystems/btrfs.txt4
-rw-r--r--Documentation/i2c/ten-bit-addresses36
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--Documentation/networking/ip-sysctl.txt12
-rw-r--r--Documentation/power/devices.txt111
-rw-r--r--Documentation/power/runtime_pm.txt40
-rw-r--r--Documentation/serial/serial-rs485.txt14
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt1
-rw-r--r--Documentation/sound/alsa/HD-Audio.txt8
-rw-r--r--Documentation/sound/alsa/soc/machine.txt6
-rw-r--r--Documentation/usb/linux-cdc-acm.inf4
-rw-r--r--Kbuild2
-rw-r--r--MAINTAINERS83
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig23
-rw-r--r--arch/arm/boot/Makefile2
-rw-r--r--arch/arm/boot/dts/tegra-ventana.dts3
-rw-r--r--arch/arm/common/gic.c16
-rw-r--r--arch/arm/common/pl330.c12
-rw-r--r--arch/arm/configs/at91cap9_defconfig (renamed from arch/arm/configs/at91cap9adk_defconfig)7
-rw-r--r--arch/arm/configs/at91rm9200_defconfig47
-rw-r--r--arch/arm/configs/at91sam9260_defconfig (renamed from arch/arm/configs/at91sam9260ek_defconfig)16
-rw-r--r--arch/arm/configs/at91sam9g20_defconfig (renamed from arch/arm/configs/at91sam9g20ek_defconfig)23
-rw-r--r--arch/arm/configs/at91sam9g45_defconfig7
-rw-r--r--arch/arm/configs/at91sam9rl_defconfig (renamed from arch/arm/configs/at91sam9rlek_defconfig)5
-rw-r--r--arch/arm/configs/ezx_defconfig2
-rw-r--r--arch/arm/configs/imote2_defconfig2
-rw-r--r--arch/arm/configs/magician_defconfig2
-rw-r--r--arch/arm/configs/omap1_defconfig6
-rw-r--r--arch/arm/configs/u300_defconfig13
-rw-r--r--arch/arm/configs/u8500_defconfig14
-rw-r--r--arch/arm/configs/zeus_defconfig2
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h2
-rw-r--r--arch/arm/include/asm/mach/arch.h1
-rw-r--r--arch/arm/include/asm/pmu.h10
-rw-r--r--arch/arm/include/asm/topology.h2
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/asm/unwind.h16
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/kprobes-arm.c4
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c27
-rw-r--r--arch/arm/kernel/kprobes-test-thumb.c16
-rw-r--r--arch/arm/kernel/kprobes-test.h100
-rw-r--r--arch/arm/kernel/machine_kexec.c35
-rw-r--r--arch/arm/kernel/perf_event.c20
-rw-r--r--arch/arm/kernel/pmu.c1
-rw-r--r--arch/arm/kernel/process.c3
-rw-r--r--arch/arm/kernel/setup.c20
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/kernel/unwind.c129
-rw-r--r--arch/arm/lib/bitops.h26
-rw-r--r--arch/arm/lib/changebit.S4
-rw-r--r--arch/arm/lib/clearbit.S4
-rw-r--r--arch/arm/lib/setbit.S4
-rw-r--r--arch/arm/lib/testchangebit.S4
-rw-r--r--arch/arm/lib/testclearbit.S4
-rw-r--r--arch/arm/lib/testsetbit.S4
-rw-r--r--arch/arm/mach-at91/at91cap9_devices.c7
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c7
-rw-r--r--arch/arm/mach-at91/at91sam9260.c6
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c7
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c7
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c7
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c7
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c7
-rw-r--r--arch/arm/mach-at91/board-yl-9200.c2
-rw-r--r--arch/arm/mach-at91/include/mach/system_rev.h2
-rw-r--r--arch/arm/mach-at91/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-bcmring/core.c2
-rw-r--r--arch/arm/mach-bcmring/dma.c1
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c6
-rw-r--r--arch/arm/mach-davinci/dm646x.c1
-rw-r--r--arch/arm/mach-davinci/include/mach/psc.h5
-rw-r--r--arch/arm/mach-davinci/psc.c18
-rw-r--r--arch/arm/mach-exynos/cpuidle.c2
-rw-r--r--arch/arm/mach-exynos/mct.c13
-rw-r--r--arch/arm/mach-highbank/highbank.c4
-rw-r--r--arch/arm/mach-imx/Kconfig13
-rw-r--r--arch/arm/mach-imx/Makefile.boot34
-rw-r--r--arch/arm/mach-imx/clock-imx6q.c24
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c10
-rw-r--r--arch/arm/mach-imx/mm-imx3.c109
-rw-r--r--arch/arm/mach-imx/src.c7
-rw-r--r--arch/arm/mach-mmp/gplugd.c2
-rw-r--r--arch/arm/mach-mmp/include/mach/gpio-pxa.h2
-rw-r--r--arch/arm/mach-msm/Makefile2
-rw-r--r--arch/arm/mach-msm/board-msm7x30.c4
-rw-r--r--arch/arm/mach-msm/board-msm8960.c4
-rw-r--r--arch/arm/mach-msm/board-msm8x60.c4
-rw-r--r--arch/arm/mach-msm/devices-iommu.c1
-rw-r--r--arch/arm/mach-msm/scm.c3
-rw-r--r--arch/arm/mach-mx5/board-mx51_babbage.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_evk.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_loco.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_smd.c2
-rw-r--r--arch/arm/mach-mx5/clock-mx51-mx53.c6
-rw-r--r--arch/arm/mach-mx5/cpu.c5
-rw-r--r--arch/arm/mach-mx5/imx51-dt.c12
-rw-r--r--arch/arm/mach-mx5/imx53-dt.c12
-rw-r--r--arch/arm/mach-mx5/mm.c6
-rw-r--r--arch/arm/mach-mxs/clock-mx28.c2
-rw-r--r--arch/arm/mach-mxs/include/mach/mx28.h4
-rw-r--r--arch/arm/mach-mxs/include/mach/mxs.h1
-rw-r--r--arch/arm/mach-mxs/mach-m28evk.c2
-rw-r--r--arch/arm/mach-mxs/mach-mx28evk.c4
-rw-r--r--arch/arm/mach-mxs/mach-stmp378x_devb.c2
-rw-r--r--arch/arm/mach-mxs/module-tx28.c4
-rw-r--r--arch/arm/mach-omap1/Kconfig8
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c10
-rw-r--r--arch/arm/mach-omap1/clock.h3
-rw-r--r--arch/arm/mach-omap1/clock_data.c61
-rw-r--r--arch/arm/mach-omap1/devices.c3
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/Makefile5
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c2
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c1
-rw-r--r--arch/arm/mach-omap2/display.c159
-rw-r--r--arch/arm/mach-omap2/display.h29
-rw-r--r--arch/arm/mach-omap2/io.h0
-rw-r--r--arch/arm/mach-omap2/mcbsp.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c17
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c17
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c37
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c24
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.h4
-rw-r--r--arch/arm/mach-omap2/omap_l3_noc.c2
-rw-r--r--arch/arm/mach-omap2/pm.c6
-rw-r--r--arch/arm/mach-omap2/smartreflex.c2
-rw-r--r--arch/arm/mach-omap2/twl-common.c11
-rw-r--r--arch/arm/mach-omap2/twl-common.h3
-rw-r--r--arch/arm/mach-picoxcell/include/mach/debug-macro.S2
-rw-r--r--arch/arm/mach-prima2/pm.c1
-rw-r--r--arch/arm/mach-prima2/prima2.c1
-rw-r--r--arch/arm/mach-pxa/balloon3.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa320.c2
-rw-r--r--arch/arm/mach-pxa/gumstix.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/palm27x.h4
-rw-r--r--arch/arm/mach-pxa/palm27x.c4
-rw-r--r--arch/arm/mach-pxa/palmtc.c2
-rw-r--r--arch/arm/mach-pxa/vpac270.c2
-rw-r--r--arch/arm/mach-s3c64xx/dev-spi.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410-module.c2
-rw-r--r--arch/arm/mach-s3c64xx/s3c6400.c2
-rw-r--r--arch/arm/mach-s3c64xx/setup-fb-24bpp.c2
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkv210.c1
-rw-r--r--arch/arm/mach-sa1100/Makefile.boot4
-rw-r--r--arch/arm/mach-shmobile/Makefile2
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c16
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c2
-rw-r--r--arch/arm/mach-shmobile/board-kota2.c7
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c8
-rw-r--r--arch/arm/mach-shmobile/cpuidle.c52
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h4
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh73a0.h8
-rw-r--r--arch/arm/mach-shmobile/pfc-sh7367.c122
-rw-r--r--arch/arm/mach-shmobile/pfc-sh7372.c262
-rw-r--r--arch/arm/mach-shmobile/pfc-sh7377.c159
-rw-r--r--arch/arm/mach-shmobile/pfc-sh73a0.c193
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c47
-rw-r--r--arch/arm/mach-tegra/board-dt.c13
-rw-r--r--arch/arm/mach-tegra/board-harmony-pinmux.c6
-rw-r--r--arch/arm/mach-tegra/board-paz00-pinmux.c6
-rw-r--r--arch/arm/mach-tegra/board-seaboard-pinmux.c6
-rw-r--r--arch/arm/mach-tegra/board-trimslice-pinmux.c5
-rw-r--r--arch/arm/mach-w90x900/dev.c4
-rw-r--r--arch/arm/mach-w90x900/include/mach/mfp.h3
-rw-r--r--arch/arm/mach-w90x900/include/mach/nuc900_spi.h2
-rw-r--r--arch/arm/mach-w90x900/mfp.c48
-rw-r--r--arch/arm/mm/cache-l2x0.c2
-rw-r--r--arch/arm/mm/dma-mapping.c11
-rw-r--r--arch/arm/mm/mmap.c23
-rw-r--r--arch/arm/plat-mxc/Kconfig4
-rw-r--r--arch/arm/plat-mxc/avic.c1
-rw-r--r--arch/arm/plat-mxc/cpufreq.c1
-rw-r--r--arch/arm/plat-mxc/gic.c11
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/entry-macro.S3
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc.h14
-rw-r--r--arch/arm/plat-mxc/include/mach/system.h7
-rw-r--r--arch/arm/plat-mxc/pwm.c7
-rw-r--r--arch/arm/plat-mxc/system.c3
-rw-r--r--arch/arm/plat-mxc/tzic.c1
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h2
-rw-r--r--arch/arm/plat-omap/include/plat/common.h3
-rw-r--r--arch/arm/plat-s3c24xx/cpu-freq-debugfs.c2
-rw-r--r--arch/arm/plat-s5p/sysmmu.c1
-rw-r--r--arch/arm/plat-samsung/dev-backlight.c1
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg.h2
-rw-r--r--arch/arm/plat-samsung/pd.c2
-rw-r--r--arch/arm/plat-samsung/pwm.c2
-rw-r--r--arch/arm/tools/mach-types1
-rw-r--r--arch/blackfin/include/asm/bfin_serial.h2
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c14
-rw-r--r--arch/blackfin/mach-bf518/boards/tcm-bf518.c14
-rw-r--r--arch/blackfin/mach-bf527/boards/ad7160eval.c14
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c16
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c16
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c14
-rw-r--r--arch/blackfin/mach-bf527/boards/tll6527m.c14
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c7
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c7
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c7
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c7
-rw-r--r--arch/blackfin/mach-bf533/boards/ip0x.c7
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c7
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c16
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c16
-rw-r--r--arch/blackfin/mach-bf537/boards/dnp5370.c16
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c14
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c15
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c16
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c16
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c21
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c28
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c28
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c7
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c7
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c7
-rw-r--r--arch/blackfin/mach-bf561/boards/tepla.c7
-rw-r--r--arch/cris/arch-v10/drivers/Kconfig2
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig2
-rw-r--r--arch/m68k/Kconfig4
-rw-r--r--arch/m68k/Kconfig.bus9
-rw-r--r--arch/m68k/Kconfig.devices31
-rw-r--r--arch/m68k/amiga/amiints.c168
-rw-r--r--arch/m68k/amiga/cia.c39
-rw-r--r--arch/m68k/apollo/dn_ints.c35
-rw-r--r--arch/m68k/atari/ataints.c274
-rw-r--r--arch/m68k/bvme6000/config.c2
-rw-r--r--arch/m68k/hp300/time.c2
-rw-r--r--arch/m68k/include/asm/hardirq.h5
-rw-r--r--arch/m68k/include/asm/irq.h69
-rw-r--r--arch/m68k/include/asm/macintosh.h2
-rw-r--r--arch/m68k/include/asm/q40ints.h3
-rw-r--r--arch/m68k/include/asm/unistd.h4
-rw-r--r--arch/m68k/kernel/Makefile9
-rw-r--r--arch/m68k/kernel/entry_mm.S7
-rw-r--r--arch/m68k/kernel/ints.c323
-rw-r--r--arch/m68k/kernel/syscalltable.S2
-rw-r--r--arch/m68k/mac/baboon.c21
-rw-r--r--arch/m68k/mac/iop.c10
-rw-r--r--arch/m68k/mac/macints.c24
-rw-r--r--arch/m68k/mac/oss.c54
-rw-r--r--arch/m68k/mac/psc.c49
-rw-r--r--arch/m68k/mac/via.c74
-rw-r--r--arch/m68k/mvme147/config.c5
-rw-r--r--arch/m68k/mvme16x/config.c2
-rw-r--r--arch/m68k/q40/q40ints.c60
-rw-r--r--arch/m68k/sun3/sun3ints.c46
-rw-r--r--arch/microblaze/include/asm/namei.h22
-rw-r--r--arch/mips/Makefile4
-rw-r--r--arch/mips/cavium-octeon/flash_setup.c9
-rw-r--r--arch/mips/cavium-octeon/smp.c5
-rw-r--r--arch/mips/emma/common/prom.c2
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/gpio.h18
-rw-r--r--arch/mips/include/asm/unistd.h18
-rw-r--r--arch/mips/kernel/cevt-r4k.c38
-rw-r--r--arch/mips/kernel/cpufreq/loongson2_clock.c1
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c8
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/traps.c3
-rw-r--r--arch/mips/lantiq/clk.c2
-rw-r--r--arch/mips/lantiq/devices.c2
-rw-r--r--arch/mips/lantiq/prom.c2
-rw-r--r--arch/mips/lantiq/setup.c2
-rw-r--r--arch/mips/lantiq/xway/clk-ase.c2
-rw-r--r--arch/mips/lantiq/xway/clk-xway.c2
-rw-r--r--arch/mips/lantiq/xway/devices.c2
-rw-r--r--arch/mips/lantiq/xway/dma.c1
-rw-r--r--arch/mips/lantiq/xway/gpio.c2
-rw-r--r--arch/mips/lantiq/xway/gpio_ebu.c2
-rw-r--r--arch/mips/lantiq/xway/gpio_stp.c2
-rw-r--r--arch/mips/lantiq/xway/prom-ase.c2
-rw-r--r--arch/mips/lantiq/xway/prom-xway.c2
-rw-r--r--arch/mips/lantiq/xway/reset.c2
-rw-r--r--arch/mips/nxp/pnx8550/common/pci.c134
-rw-r--r--arch/mips/nxp/pnx8550/common/setup.c143
-rw-r--r--arch/mips/pci/pci-alchemy.c1
-rw-r--r--arch/mips/pci/pci-lantiq.c1
-rw-r--r--arch/mips/pmc-sierra/yosemite/prom.c2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/Makefile6
-rw-r--r--arch/powerpc/boot/dts/charon.dts236
-rw-r--r--arch/powerpc/boot/dts/p1023rds.dts17
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig20
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64_defconfig4
-rw-r--r--arch/powerpc/configs/pseries_defconfig4
-rw-r--r--arch/powerpc/include/asm/atomic.h48
-rw-r--r--arch/powerpc/include/asm/bitops.h12
-rw-r--r--arch/powerpc/include/asm/floppy.h4
-rw-r--r--arch/powerpc/include/asm/futex.h7
-rw-r--r--arch/powerpc/include/asm/kvm.h8
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/lv1call.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h2
-rw-r--r--arch/powerpc/include/asm/sections.h2
-rw-r--r--arch/powerpc/include/asm/synch.h9
-rw-r--r--arch/powerpc/include/asm/xics.h4
-rw-r--r--arch/powerpc/kernel/entry_32.S15
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S6
-rw-r--r--arch/powerpc/kernel/jump_label.c2
-rw-r--r--arch/powerpc/kernel/kvm.c1
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/process.c24
-rw-r--r--arch/powerpc/kernel/prom_init.c6
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c1
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c14
-rw-r--r--arch/powerpc/kvm/powerpc.c1
-rw-r--r--arch/powerpc/lib/feature-fixups.c23
-rw-r--r--arch/powerpc/mm/hugetlbpage.c1
-rw-r--r--arch/powerpc/mm/mem.c30
-rw-r--r--arch/powerpc/mm/numa.c24
-rw-r--r--arch/powerpc/platforms/52xx/mpc5200_simple.c1
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/85xx/p3060_qds.c2
-rw-r--r--arch/powerpc/platforms/Kconfig2
-rw-r--r--arch/powerpc/platforms/cell/beat.c2
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c2
-rw-r--r--arch/powerpc/platforms/cell/iommu.c3
-rw-r--r--arch/powerpc/platforms/cell/pmu.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c9
-rw-r--r--arch/powerpc/platforms/powermac/pic.c1
-rw-r--r--arch/powerpc/platforms/powermac/smp.c4
-rw-r--r--arch/powerpc/platforms/ps3/device-init.c2
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c23
-rw-r--r--arch/powerpc/platforms/ps3/platform.h1
-rw-r--r--arch/powerpc/platforms/ps3/repository.c32
-rw-r--r--arch/powerpc/platforms/ps3/smp.c62
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c1
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c1
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/ppc4xx_soc.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c2
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c5
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/crypto/crypt_s390.h7
-rw-r--r--arch/s390/include/asm/kvm_host.h3
-rw-r--r--arch/s390/include/asm/pgtable.h20
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/timex.h2
-rw-r--r--arch/s390/include/asm/unistd.h4
-rw-r--r--arch/s390/kernel/compat_wrapper.S20
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/ptrace.c30
-rw-r--r--arch/s390/kernel/setup.c6
-rw-r--r--arch/s390/kernel/signal.c8
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/topology.c45
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/kvm/diag.c2
-rw-r--r--arch/s390/kvm/intercept.c3
-rw-r--r--arch/s390/kvm/interrupt.c1
-rw-r--r--arch/s390/kvm/kvm-s390.c12
-rw-r--r--arch/s390/kvm/priv.c10
-rw-r--r--arch/s390/kvm/sigp.c45
-rw-r--r--arch/s390/mm/fault.c9
-rw-r--r--arch/sh/include/asm/page.h5
-rw-r--r--arch/sh/include/asm/unistd_32.h4
-rw-r--r--arch/sh/include/asm/unistd_64.h4
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7203.c16
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sh/kernel/syscalls_64.S2
-rw-r--r--arch/sparc/include/asm/pgtable_32.h20
-rw-r--r--arch/sparc/include/asm/pgtable_64.h20
-rw-r--r--arch/sparc/include/asm/unistd.h4
-rw-r--r--arch/sparc/kernel/ds.c6
-rw-r--r--arch/sparc/kernel/entry.h7
-rw-r--r--arch/sparc/kernel/module.c27
-rw-r--r--arch/sparc/kernel/prom_common.c4
-rw-r--r--arch/sparc/kernel/setup_64.c48
-rw-r--r--arch/sparc/kernel/signal32.c18
-rw-r--r--arch/sparc/kernel/signal_32.c30
-rw-r--r--arch/sparc/kernel/signal_64.c42
-rw-r--r--arch/sparc/kernel/sigutil_64.c1
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/sparc/mm/Makefile1
-rw-r--r--arch/sparc/mm/btfixup.c3
-rw-r--r--arch/sparc/mm/generic_32.c99
-rw-r--r--arch/sparc/mm/generic_64.c165
-rw-r--r--arch/tile/include/asm/irq.h10
-rw-r--r--arch/tile/kernel/irq.c16
-rw-r--r--arch/tile/kernel/pci-dma.c1
-rw-r--r--arch/tile/kernel/pci.c1
-rw-r--r--arch/tile/kernel/sysfs.c1
-rw-r--r--arch/tile/lib/exports.c3
-rw-r--r--arch/tile/mm/homecache.c9
-rw-r--r--arch/unicore32/Kconfig4
-rw-r--r--arch/unicore32/Kconfig.debug14
-rw-r--r--arch/unicore32/boot/compressed/Makefile4
-rw-r--r--arch/unicore32/include/asm/bitops.h12
-rw-r--r--arch/unicore32/include/asm/processor.h1
-rw-r--r--arch/unicore32/kernel/ksyms.c4
-rw-r--r--arch/unicore32/lib/findbit.S14
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/intel_scu_ipc.h14
-rw-r--r--arch/x86/include/asm/mach_traps.h2
-rw-r--r--arch/x86/include/asm/mce.h5
-rw-r--r--arch/x86/include/asm/mrst.h16
-rw-r--r--arch/x86/include/asm/msr.h9
-rw-r--r--arch/x86/include/asm/system.h1
-rw-r--r--arch/x86/include/asm/timer.h23
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h1
-rw-r--r--arch/x86/include/asm/x86_init.h3
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/apic/apic.c33
-rw-r--r--arch/x86/kernel/apic/io_apic.c9
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c8
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c25
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c16
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c29
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c8
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c2
-rw-r--r--arch/x86/kernel/hpet.c21
-rw-r--r--arch/x86/kernel/irq_64.c3
-rw-r--r--arch/x86/kernel/kvmclock.c5
-rw-r--r--arch/x86/kernel/microcode_core.c28
-rw-r--r--arch/x86/kernel/mpparse.c2
-rw-r--r--arch/x86/kernel/nmi.c3
-rw-r--r--arch/x86/kernel/process.c8
-rw-r--r--arch/x86/kernel/quirks.c13
-rw-r--r--arch/x86/kernel/reboot.c21
-rw-r--r--arch/x86/kernel/rtc.c5
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/x86_init.c4
-rw-r--r--arch/x86/kvm/vmx.c131
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/highmem_32.c2
-rw-r--r--arch/x86/oprofile/init.c7
-rw-r--r--arch/x86/platform/ce4100/ce4100.c2
-rw-r--r--arch/x86/platform/efi/efi_32.c48
-rw-r--r--arch/x86/platform/mrst/mrst.c109
-rw-r--r--arch/x86/platform/mrst/vrtc.c4
-rw-r--r--arch/x86/um/asm/processor.h2
-rw-r--r--arch/x86/xen/enlighten.c3
-rw-r--r--arch/x86/xen/grant-table.c2
-rw-r--r--arch/x86/xen/setup.c20
-rw-r--r--block/blk-core.c41
-rw-r--r--block/blk-map.c7
-rw-r--r--block/cfq-iosched.c16
-rw-r--r--block/genhd.c71
-rw-r--r--crypto/ablkcipher.c14
-rw-r--r--crypto/aead.c14
-rw-r--r--crypto/ahash.c7
-rw-r--r--crypto/blkcipher.c7
-rw-r--r--crypto/crypto_user.c3
-rw-r--r--crypto/pcompress.c7
-rw-r--r--crypto/rng.c7
-rw-r--r--crypto/shash.c7
-rw-r--r--drivers/acpi/apei/erst.c31
-rw-r--r--drivers/acpi/processor_idle.c29
-rw-r--r--drivers/ata/ahci.c3
-rw-r--r--drivers/ata/ahci_platform.c6
-rw-r--r--drivers/ata/libata-eh.c12
-rw-r--r--drivers/ata/libata-pmp.c7
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/ata/pata_of_platform.c2
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/base/node.c14
-rw-r--r--drivers/base/power/clock_ops.c3
-rw-r--r--drivers/base/power/main.c3
-rw-r--r--drivers/base/power/opp.c2
-rw-r--r--drivers/base/power/qos.c18
-rw-r--r--drivers/block/cciss.c12
-rw-r--r--drivers/block/cciss_scsi.c1
-rw-r--r--drivers/block/loop.c51
-rw-r--r--drivers/block/paride/pg.c1
-rw-r--r--drivers/block/rbd.c101
-rw-r--r--drivers/block/swim3.c362
-rw-r--r--drivers/bluetooth/Kconfig6
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c15
-rw-r--r--drivers/bluetooth/btusb.c6
-rw-r--r--drivers/char/agp/intel-gtt.c7
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/cpufreq/db8500-cpufreq.c6
-rw-r--r--drivers/crypto/mv_cesa.c12
-rw-r--r--drivers/devfreq/Kconfig41
-rw-r--r--drivers/devfreq/devfreq.c10
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/firmware/dmi_scan.c6
-rw-r--r--drivers/firmware/efivars.c12
-rw-r--r--drivers/firmware/iscsi_ibft.c42
-rw-r--r--drivers/firmware/iscsi_ibft_find.c26
-rw-r--r--drivers/firmware/sigma.c81
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-da9052.c21
-rw-r--r--drivers/gpio/gpio-ml-ioh.c32
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c18
-rw-r--r--drivers/gpio/gpio-omap.c59
-rw-r--r--drivers/gpio/gpio-pca953x.c15
-rw-r--r--drivers/gpio/gpio-pl061.c4
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_crtc.c8
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c40
-rw-r--r--drivers/gpu/drm/drm_debugfs.c12
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c31
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c78
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c76
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c83
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c66
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c71
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c63
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c53
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h37
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c19
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h51
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c122
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c546
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c19
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c41
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c60
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c195
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h29
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h31
-rw-r--r--drivers/gpu/drm/radeon/r100.c7
-rw-r--r--drivers/gpu/drm/radeon/r300.c94
-rw-r--r--drivers/gpu/drm/radeon/r600.c118
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon.h53
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c304
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c18
-rw-r--r--drivers/gpu/drm/radeon/rs600.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c25
-rw-r--r--drivers/gpu/vga/vgaarb.c62
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/ad7314.c1
-rw-r--r--drivers/hwmon/ads7871.c1
-rw-r--r--drivers/hwmon/exynos4_tmu.c12
-rw-r--r--drivers/hwmon/gpio-fan.c13
-rw-r--r--drivers/hwmon/jz4740-hwmon.c16
-rw-r--r--drivers/hwmon/ntc_thermistor.c14
-rw-r--r--drivers/hwmon/s3c-hwmon.c13
-rw-r--r--drivers/hwmon/sch5627.c13
-rw-r--r--drivers/hwmon/sch5636.c13
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c14
-rw-r--r--drivers/hwmon/ultra45_env.c13
-rw-r--r--drivers/hwmon/wm831x-hwmon.c12
-rw-r--r--drivers/hwmon/wm8350-hwmon.c12
-rw-r--r--drivers/hwspinlock/u8500_hsem.c7
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c4
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c2
-rw-r--r--drivers/i2c/i2c-core.c4
-rw-r--r--drivers/i2c/i2c-dev.c2
-rw-r--r--drivers/ide/cy82c693.c6
-rw-r--r--drivers/ide/icside.c2
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/ide/ide-floppy.c1
-rw-r--r--drivers/ide/ide-tape.c1
-rw-r--r--drivers/ide/piix.c18
-rw-r--r--drivers/ide/triflex.c16
-rw-r--r--drivers/infiniband/core/addr.c9
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c13
-rw-r--r--drivers/input/mouse/elantech.c26
-rw-r--r--drivers/input/serio/ams_delta_serio.c1
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h14
-rw-r--r--drivers/iommu/intel-iommu.c7
-rw-r--r--drivers/iommu/intr_remapping.c2
-rw-r--r--drivers/iommu/omap-iommu-debug.c1
-rw-r--r--drivers/iommu/omap-iovmm.c1
-rw-r--r--drivers/isdn/divert/divert_procfs.c6
-rw-r--r--drivers/isdn/i4l/isdn_net.c3
-rw-r--r--drivers/leds/led-class.c5
-rw-r--r--drivers/macintosh/via-macii.c2
-rw-r--r--drivers/macintosh/via-maciisi.c4
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/md.c27
-rw-r--r--drivers/md/raid5.c24
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-i2c.c3
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-phy.c7
-rw-r--r--drivers/media/video/s5k6aa.c1
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c4
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c4
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c6
-rw-r--r--drivers/media/video/v4l2-ctrls.c5
-rw-r--r--drivers/media/video/v4l2-event.c10
-rw-r--r--drivers/media/video/videobuf2-core.c6
-rw-r--r--drivers/mfd/ab5500-core.c1
-rw-r--r--drivers/mfd/ab5500-debugfs.c1
-rw-r--r--drivers/misc/Kconfig15
-rw-r--r--drivers/misc/ad525x_dpot.h2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c9
-rw-r--r--drivers/misc/carma/carma-fpga.c9
-rw-r--r--drivers/misc/eeprom/Kconfig2
-rw-r--r--drivers/misc/pch_phub.c81
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c2
-rw-r--r--drivers/mmc/card/block.c8
-rw-r--r--drivers/mmc/core/core.c98
-rw-r--r--drivers/mmc/core/mmc.c12
-rw-r--r--drivers/mmc/host/mxcmmc.c1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c7
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c1
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c8
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c1
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/arcnet/Kconfig2
-rw-r--r--drivers/net/bonding/bond_main.c33
-rw-r--r--drivers/net/bonding/bond_sysfs.c7
-rw-r--r--drivers/net/can/sja1000/peak_pci.c1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c50
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c15
-rw-r--r--drivers/net/ethernet/cadence/Kconfig1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fec.c11
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c53
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c6
-rw-r--r--drivers/net/ethernet/ibm/iseries_veth.c2
-rw-r--r--drivers/net/ethernet/jme.c113
-rw-r--r--drivers/net/ethernet/jme.h19
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c106
-rw-r--r--drivers/net/ethernet/marvell/sky2.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c36
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c2
-rw-r--r--drivers/net/ethernet/pasemi/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h8
-rw-r--r--drivers/net/ethernet/rdc/r6040.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c55
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c131
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/tile/tilepro.c8
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c7
-rw-r--r--drivers/net/hippi/Kconfig2
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/usb/asix.c68
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/usb/lg-vl600.c25
-rw-r--r--drivers/net/usb/smsc75xx.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/regd.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c15
-rw-r--r--drivers/net/wireless/b43/xmit.h16
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c36
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c33
-rw-r--r--drivers/net/wireless/libertas/cfg.c2
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/mwifiex/scan.c9
-rw-r--r--drivers/net/wireless/p54/p54spi.c5
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c22
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/wireless/wl12xx/scan.c2
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/of/irq.c29
-rw-r--r--drivers/oprofile/oprof.c29
-rw-r--r--drivers/oprofile/timer_int.c1
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c29
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c27
-rw-r--r--drivers/pci/hotplug/shpchp_core.c4
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c4
-rw-r--r--drivers/pinctrl/Kconfig22
-rw-r--r--drivers/platform/x86/Kconfig4
-rw-r--r--drivers/platform/x86/dell-laptop.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c21
-rw-r--r--drivers/power/intel_mid_battery.c12
-rw-r--r--drivers/ps3/ps3-vuart.c2
-rw-r--r--drivers/ps3/ps3stor_lib.c2
-rw-r--r--drivers/ptp/ptp_clock.c4
-rw-r--r--drivers/rapidio/devices/tsi721.c41
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/regulator/aat2870-regulator.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/tps65910-regulator.c14
-rw-r--r--drivers/regulator/twl-regulator.c46
-rw-r--r--drivers/rtc/class.c10
-rw-r--r--drivers/rtc/interface.c44
-rw-r--r--drivers/rtc/rtc-mrst.c19
-rw-r--r--drivers/rtc/rtc-puv3.c4
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/chsc.c7
-rw-r--r--drivers/s390/cio/cio.h5
-rw-r--r--drivers/s390/cio/css.c104
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device_fsm.c30
-rw-r--r--drivers/s390/cio/device_ops.c20
-rw-r--r--drivers/s390/cio/io_sch.h5
-rw-r--r--drivers/s390/crypto/ap_bus.c25
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/s390/net/lcs.c6
-rw-r--r--drivers/s390/net/netiucv.c2
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c7
-rw-r--r--drivers/s390/net/qeth_l3_sys.c4
-rw-r--r--drivers/sbus/char/bbc_i2c.c27
-rw-r--r--drivers/sbus/char/display7seg.c13
-rw-r--r--drivers/sbus/char/envctrl.c12
-rw-r--r--drivers/sbus/char/flash.c12
-rw-r--r--drivers/sbus/char/uctrl.c12
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/hpsa.c5
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c5
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/sh/Makefile8
-rw-r--r--drivers/sh/clk/core.c107
-rw-r--r--drivers/sh/pm_runtime.c (renamed from arch/arm/mach-shmobile/pm_runtime.c)4
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-atmel.c5
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-nuc900.c3
-rw-r--r--drivers/spi/spi-pl022.c8
-rw-r--r--drivers/ssb/driver_pcicore.c8
-rw-r--r--drivers/staging/comedi/comedi_fops.c96
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c7
-rw-r--r--drivers/staging/et131x/Kconfig3
-rw-r--r--drivers/staging/et131x/et131x.c12
-rw-r--r--drivers/staging/iio/industrialio-core.c25
-rw-r--r--drivers/staging/media/as102/as102_drv.c4
-rw-r--r--drivers/staging/media/as102/as102_drv.h3
-rw-r--r--drivers/staging/octeon/ethernet-tx.c2
-rw-r--r--drivers/staging/rtl8712/usb_intf.c1
-rw-r--r--drivers/staging/rts_pstor/rtsx.c1
-rw-r--r--drivers/staging/slicoss/Kconfig2
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c15
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c4
-rw-r--r--drivers/staging/usbip/vhci_rx.c10
-rw-r--r--drivers/target/iscsi/iscsi_target.c26
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c41
-rw-r--r--drivers/target/target_core_alua.c27
-rw-r--r--drivers/target/target_core_cdb.c20
-rw-r--r--drivers/target/target_core_configfs.c11
-rw-r--r--drivers/target/target_core_device.c30
-rw-r--r--drivers/target/target_core_file.c20
-rw-r--r--drivers/target/target_core_iblock.c16
-rw-r--r--drivers/target/target_core_pr.c240
-rw-r--r--drivers/target/target_core_pscsi.c28
-rw-r--r--drivers/target/target_core_rd.c258
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_transport.c260
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c3
-rw-r--r--drivers/tty/hvc/hvc_dcc.c2
-rw-r--r--drivers/tty/n_gsm.c12
-rw-r--r--drivers/tty/serial/Kconfig14
-rw-r--r--drivers/tty/serial/atmel_serial.c16
-rw-r--r--drivers/tty/serial/crisv10.c10
-rw-r--r--drivers/tty/serial/mfd.c4
-rw-r--r--drivers/tty/serial/pch_uart.c19
-rw-r--r--drivers/tty/serial/sh-sci.c19
-rw-r--r--drivers/tty/tty_ldisc.c30
-rw-r--r--drivers/usb/class/cdc-acm.c18
-rw-r--r--drivers/usb/core/hub.c6
-rw-r--r--drivers/usb/core/quirks.c27
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/Kconfig9
-rw-r--r--drivers/usb/gadget/amd5536udc.c2
-rw-r--r--drivers/usb/gadget/ci13xxx_msm.c2
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c21
-rw-r--r--drivers/usb/gadget/f_mass_storage.c7
-rw-r--r--drivers/usb/gadget/f_midi.c138
-rw-r--r--drivers/usb/gadget/f_phonet.c2
-rw-r--r--drivers/usb/gadget/f_serial.c4
-rw-r--r--drivers/usb/gadget/file_storage.c4
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c3
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c3
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c80
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h10
-rw-r--r--drivers/usb/gadget/inode.c5
-rw-r--r--drivers/usb/gadget/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/net2280.c2
-rw-r--r--drivers/usb/gadget/pch_udc.c10
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c32
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c4
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c3
-rw-r--r--drivers/usb/gadget/udc-core.c10
-rw-r--r--drivers/usb/host/ehci-sched.c22
-rw-r--r--drivers/usb/host/ehci-xls.c2
-rw-r--r--drivers/usb/host/ohci-at91.c6
-rw-r--r--drivers/usb/host/ohci-hcd.c15
-rw-r--r--drivers/usb/host/ohci-pci.c26
-rw-r--r--drivers/usb/host/ohci.h1
-rw-r--r--drivers/usb/host/pci-quirks.c57
-rw-r--r--drivers/usb/host/whci/qset.c2
-rw-r--r--drivers/usb/host/xhci-mem.c5
-rw-r--r--drivers/usb/host/xhci-ring.c13
-rw-r--r--drivers/usb/host/xhci.c39
-rw-r--r--drivers/usb/musb/Kconfig3
-rw-r--r--drivers/usb/musb/am35x.c1
-rw-r--r--drivers/usb/musb/da8xx.c1
-rw-r--r--drivers/usb/musb/musb_core.c9
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/renesas_usbhs/common.c2
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod.h8
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c51
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c64
-rw-r--r--drivers/usb/serial/ark3116.c10
-rw-r--r--drivers/usb/serial/ftdi_sio.c15
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/option.c37
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/storage/ene_ub6250.c3
-rw-r--r--drivers/usb/storage/protocol.c7
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/video/da8xx-fb.c15
-rw-r--r--drivers/video/omap/dispc.c1
-rw-r--r--drivers/video/omap2/dss/dispc.c11
-rw-r--r--drivers/video/omap2/dss/hdmi.c2
-rw-r--r--drivers/video/via/share.h4
-rw-r--r--drivers/virtio/Kconfig2
-rw-r--r--drivers/virtio/virtio_mmio.c2
-rw-r--r--drivers/virtio/virtio_pci.c29
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/adx_wdt.c355
-rw-r--r--drivers/watchdog/s3c2410_wdt.c4
-rw-r--r--drivers/watchdog/wm831x_wdt.c2
-rw-r--r--drivers/xen/balloon.c4
-rw-r--r--drivers/xen/gntalloc.c4
-rw-r--r--drivers/xen/gntdev.c10
-rw-r--r--drivers/xen/swiotlb-xen.c4
-rw-r--r--drivers/xen/xenbus/xenbus_client.c11
-rw-r--r--fs/bio.c7
-rw-r--r--fs/btrfs/async-thread.c117
-rw-r--r--fs/btrfs/async-thread.h4
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/btrfs_inode.h4
-rw-r--r--fs/btrfs/ctree.c17
-rw-r--r--fs/btrfs/ctree.h11
-rw-r--r--fs/btrfs/delayed-inode.c62
-rw-r--r--fs/btrfs/disk-io.c223
-rw-r--r--fs/btrfs/extent-tree.c336
-rw-r--r--fs/btrfs/extent_io.c60
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/file.c8
-rw-r--r--fs/btrfs/free-space-cache.c82
-rw-r--r--fs/btrfs/inode-map.c28
-rw-r--r--fs/btrfs/inode.c272
-rw-r--r--fs/btrfs/ioctl.c23
-rw-r--r--fs/btrfs/relocation.c4
-rw-r--r--fs/btrfs/scrub.c79
-rw-r--r--fs/btrfs/super.c125
-rw-r--r--fs/btrfs/transaction.c12
-rw-r--r--fs/btrfs/volumes.c15
-rw-r--r--fs/btrfs/volumes.h6
-rw-r--r--fs/ceph/addr.c8
-rw-r--r--fs/ceph/caps.c187
-rw-r--r--fs/ceph/dir.c26
-rw-r--r--fs/ceph/file.c23
-rw-r--r--fs/ceph/inode.c62
-rw-r--r--fs/ceph/ioctl.c4
-rw-r--r--fs/ceph/mds_client.c33
-rw-r--r--fs/ceph/mds_client.h2
-rw-r--r--fs/ceph/snap.c16
-rw-r--r--fs/ceph/super.c8
-rw-r--r--fs/ceph/super.h31
-rw-r--r--fs/ceph/xattr.c42
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/file.c26
-rw-r--r--fs/cifs/readdir.c10
-rw-r--r--fs/cifs/smbencrypt.c6
-rw-r--r--fs/configfs/inode.c2
-rw-r--r--fs/configfs/mount.c36
-rw-r--r--fs/dcache.c82
-rw-r--r--fs/ecryptfs/crypto.c26
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h5
-rw-r--r--fs/ecryptfs/file.c23
-rw-r--r--fs/ecryptfs/inode.c52
-rw-r--r--fs/ext4/balloc.c2
-rw-r--r--fs/ext4/extents.c3
-rw-r--r--fs/ext4/inode.c55
-rw-r--r--fs/ext4/page-io.c12
-rw-r--r--fs/ext4/super.c23
-rw-r--r--fs/fs-writeback.c5
-rw-r--r--fs/fuse/dev.c3
-rw-r--r--fs/fuse/file.c6
-rw-r--r--fs/fuse/inode.c24
-rw-r--r--fs/hfs/trans.c2
-rw-r--r--fs/minix/bitmap.c55
-rw-r--r--fs/minix/inode.c25
-rw-r--r--fs/minix/minix.h11
-rw-r--r--fs/namespace.c52
-rw-r--r--fs/ncpfs/inode.c8
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/file.c91
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/nfs3proc.c1
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/pnfs.c26
-rw-r--r--fs/nfs/proc.c1
-rw-r--r--fs/nfs/read.c14
-rw-r--r--fs/nfs/super.c37
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/aops.c69
-rw-r--r--fs/ocfs2/aops.h14
-rw-r--r--fs/ocfs2/cluster/heartbeat.c194
-rw-r--r--fs/ocfs2/cluster/netdebug.c102
-rw-r--r--fs/ocfs2/cluster/tcp.c138
-rw-r--r--fs/ocfs2/cluster/tcp.h2
-rw-r--r--fs/ocfs2/dir.c3
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h56
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c44
-rw-r--r--fs/ocfs2/dlm/dlmlock.c54
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c175
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c164
-rw-r--r--fs/ocfs2/dlm/dlmthread.c16
-rw-r--r--fs/ocfs2/dlmglue.c21
-rw-r--r--fs/ocfs2/extent_map.c96
-rw-r--r--fs/ocfs2/extent_map.h2
-rw-r--r--fs/ocfs2/file.c96
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/ocfs2/inode.h3
-rw-r--r--fs/ocfs2/ioctl.c11
-rw-r--r--fs/ocfs2/journal.c23
-rw-r--r--fs/ocfs2/journal.h5
-rw-r--r--fs/ocfs2/mmap.c53
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/ocfs2.h51
-rw-r--r--fs/ocfs2/quota_local.c23
-rw-r--r--fs/ocfs2/slot_map.c4
-rw-r--r--fs/ocfs2/stack_o2cb.c71
-rw-r--r--fs/ocfs2/super.c25
-rw-r--r--fs/ocfs2/xattr.c10
-rw-r--r--fs/proc/base.c146
-rw-r--r--fs/proc/meminfo.c7
-rw-r--r--fs/proc/root.c8
-rw-r--r--fs/proc/stat.c4
-rw-r--r--fs/pstore/platform.c13
-rw-r--r--fs/seq_file.c6
-rw-r--r--fs/ubifs/super.c18
-rw-r--r--fs/xfs/xfs_acl.c2
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/xfs/xfs_attr_leaf.c64
-rw-r--r--fs/xfs/xfs_bmap.c20
-rw-r--r--fs/xfs/xfs_buf_item.c2
-rw-r--r--fs/xfs/xfs_dquot_item.c6
-rw-r--r--fs/xfs/xfs_export.c8
-rw-r--r--fs/xfs/xfs_extfree_item.c4
-rw-r--r--fs/xfs/xfs_inode.c21
-rw-r--r--fs/xfs/xfs_inode.h1
-rw-r--r--fs/xfs/xfs_inode_item.c2
-rw-r--r--fs/xfs/xfs_log.c350
-rw-r--r--fs/xfs/xfs_log.h2
-rw-r--r--fs/xfs/xfs_qm.c3
-rw-r--r--fs/xfs/xfs_sync.c11
-rw-r--r--fs/xfs/xfs_trace.h12
-rw-r--r--fs/xfs/xfs_trans.h6
-rw-r--r--fs/xfs/xfs_vnodeops.c14
-rw-r--r--include/asm-generic/unistd.h8
-rw-r--r--include/drm/drmP.h4
-rw-r--r--include/drm/drm_mode.h2
-rw-r--r--include/drm/drm_pciids.h20
-rw-r--r--include/drm/exynos_drm.h7
-rw-r--r--include/drm/radeon_drm.h4
-rw-r--r--include/linux/bio.h10
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/ceph/osd_client.h8
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/compat.h9
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/devfreq.h2
-rw-r--r--include/linux/device.h7
-rw-r--r--include/linux/dma_remapping.h2
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--include/linux/genhd.h4
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/hwspinlock.h1
-rw-r--r--include/linux/i2c.h3
-rw-r--r--include/linux/inet_diag.h3
-rw-r--r--include/linux/init_task.h5
-rw-r--r--include/linux/kvm.h1
-rw-r--r--include/linux/log2.h1
-rw-r--r--include/linux/mfd/tps65910.h3
-rw-r--r--include/linux/mfd/wm8994/registers.h15
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mmc/card.h6
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/nfs_fs.h3
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/pci-ats.h6
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h6
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/pinctrl/pinctrl.h1
-rw-r--r--include/linux/pkt_sched.h6
-rw-r--r--include/linux/pm.h231
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/pstore.h4
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/serial.h14
-rw-r--r--include/linux/serial_sci.h1
-rw-r--r--include/linux/sh_clk.h4
-rw-r--r--include/linux/sh_pfc.h76
-rw-r--r--include/linux/shrinker.h2
-rw-r--r--include/linux/sigma.h13
-rw-r--r--include/linux/virtio_config.h2
-rw-r--r--include/linux/virtio_mmio.h2
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--include/net/bluetooth/l2cap.h7
-rw-r--r--include/net/cfg80211.h4
-rw-r--r--include/net/dst.h7
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/inet_sock.h2
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h19
-rw-r--r--include/net/netns/conntrack.h2
-rw-r--r--include/net/red.h15
-rw-r--r--include/net/route.h4
-rw-r--r--include/target/target_core_base.h46
-rw-r--r--include/target/target_core_transport.h24
-rw-r--r--include/video/omapdss.h7
-rw-r--r--include/xen/platform_pci.h6
-rw-r--r--ipc/mqueue.c8
-rw-r--r--ipc/msgutil.c5
-rw-r--r--kernel/cgroup_freezer.c11
-rw-r--r--kernel/events/core.c91
-rw-r--r--kernel/events/internal.h3
-rw-r--r--kernel/events/ring_buffer.c3
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/irq/manage.c7
-rw-r--r--kernel/irq/spurious.c6
-rw-r--r--kernel/jump_label.c3
-rw-r--r--kernel/lockdep.c8
-rw-r--r--kernel/power/hibernate.c37
-rw-r--r--kernel/power/main.c3
-rw-r--r--kernel/power/qos.c1
-rw-r--r--kernel/printk.c3
-rw-r--r--kernel/sched.c17
-rw-r--r--kernel/sched_fair.c159
-rw-r--r--kernel/sched_features.h1
-rw-r--r--kernel/sched_rt.c3
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/time/clockevents.c1
-rw-r--r--kernel/time/clocksource.c62
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/timekeeping.c92
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/ftrace.c5
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--kernel/trace/trace_events_filter.c13
-rw-r--r--lib/dma-debug.c2
-rw-r--r--mm/backing-dev.c8
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/huge_memory.c16
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/oom_kill.c5
-rw-r--r--mm/page-writeback.c55
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/percpu-vm.c17
-rw-r--r--mm/percpu.c62
-rw-r--r--mm/slab.c5
-rw-r--r--mm/slub.c42
-rw-r--r--mm/vmalloc.c29
-rw-r--r--mm/vmscan.c26
-rw-r--r--net/batman-adv/translation-table.c27
-rw-r--r--net/bluetooth/bnep/core.c8
-rw-r--r--net/bluetooth/cmtp/core.c5
-rw-r--r--net/bluetooth/hci_conn.c2
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/l2cap_core.c16
-rw-r--r--net/bridge/br_multicast.c6
-rw-r--r--net/bridge/br_netlink.c6
-rw-r--r--net/bridge/br_stp.c29
-rw-r--r--net/caif/cffrml.c11
-rw-r--r--net/ceph/crush/mapper.c35
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/core/dev.c9
-rw-r--r--net/core/dev_addr_lists.c3
-rw-r--r--net/core/neighbour.c5
-rw-r--r--net/core/request_sock.c7
-rw-r--r--net/core/secure_seq.c2
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/decnet/dn_route.c10
-rw-r--r--net/decnet/dn_timer.c17
-rw-r--r--net/ipv4/ah4.c8
-rw-r--r--net/ipv4/devinet.c5
-rw-r--r--net/ipv4/igmp.c3
-rw-r--r--net/ipv4/inet_diag.c14
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_options.c4
-rw-r--r--net/ipv4/ipip.c7
-rw-r--r--net/ipv4/netfilter.c3
-rw-r--r--net/ipv4/netfilter/Kconfig1
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/route.c196
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c15
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/ah6.c8
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_input.c8
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/Kconfig1
-rw-r--r--net/ipv6/route.c23
-rw-r--r--net/ipv6/sit.c7
-rw-r--r--net/ipv6/tcp_ipv6.c13
-rw-r--r--net/ipv6/udp.c15
-rw-r--r--net/l2tp/l2tp_core.c10
-rw-r--r--net/mac80211/agg-tx.c128
-rw-r--r--net/mac80211/debugfs_sta.c4
-rw-r--r--net/mac80211/main.c6
-rw-r--r--net/mac80211/mlme.c21
-rw-r--r--net/mac80211/rx.c9
-rw-r--r--net/mac80211/sta_info.c8
-rw-r--r--net/mac80211/status.c8
-rw-r--r--net/mac80211/util.c5
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c2
-rw-r--r--net/netfilter/nf_conntrack_ecache.c37
-rw-r--r--net/netfilter/nf_conntrack_netlink.c73
-rw-r--r--net/netlabel/netlabel_kapi.c26
-rw-r--r--net/rds/Kconfig1
-rw-r--r--net/sched/sch_gred.c2
-rw-r--r--net/sched/sch_red.c4
-rw-r--r--net/sched/sch_teql.c31
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/sunrpc/xprtsock.c7
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--net/wireless/nl80211.c13
-rw-r--r--net/wireless/reg.c48
-rw-r--r--net/wireless/scan.c13
-rw-r--r--net/xfrm/xfrm_policy.c10
-rw-r--r--security/apparmor/path.c65
-rw-r--r--security/keys/encrypted-keys/Makefile8
-rw-r--r--security/keys/encrypted-keys/encrypted.c2
-rw-r--r--security/keys/encrypted-keys/encrypted.h3
-rw-r--r--security/keys/user_defined.c3
-rw-r--r--security/smack/smackfs.c115
-rw-r--r--security/tomoyo/realpath.c13
-rw-r--r--sound/core/vmaster.c18
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c2
-rw-r--r--sound/pci/hda/hda_codec.c70
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/hda_eld.c41
-rw-r--r--sound/pci/hda/hda_intel.c5
-rw-r--r--sound/pci/hda/hda_local.h19
-rw-r--r--sound/pci/hda/patch_cirrus.c55
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_hdmi.c59
-rw-r--r--sound/pci/hda/patch_realtek.c114
-rw-r--r--sound/pci/hda/patch_sigmatel.c119
-rw-r--r--sound/pci/hda/patch_via.c76
-rw-r--r--sound/pci/intel8x0.c58
-rw-r--r--sound/pci/lx6464es/lx_core.c23
-rw-r--r--sound/pci/lx6464es/lx_core.h3
-rw-r--r--sound/pci/rme9652/hdspm.c2
-rw-r--r--sound/pci/sis7019.c64
-rw-r--r--sound/ppc/snd_ps3.c2
-rw-r--r--sound/soc/atmel/Kconfig21
-rw-r--r--sound/soc/atmel/Makefile4
-rw-r--r--sound/soc/atmel/playpaq_wm8510.c473
-rw-r--r--sound/soc/codecs/Kconfig2
-rw-r--r--sound/soc/codecs/ad1836.h2
-rw-r--r--sound/soc/codecs/adau1373.c2
-rw-r--r--sound/soc/codecs/cs4270.c10
-rw-r--r--sound/soc/codecs/cs4271.c8
-rw-r--r--sound/soc/codecs/cs42l51.c2
-rw-r--r--sound/soc/codecs/jz4740.c1
-rw-r--r--sound/soc/codecs/max9877.c10
-rw-r--r--sound/soc/codecs/rt5631.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c2
-rw-r--r--sound/soc/codecs/sta32x.c63
-rw-r--r--sound/soc/codecs/sta32x.h1
-rw-r--r--sound/soc/codecs/uda1380.c4
-rw-r--r--sound/soc/codecs/wm8731.c1
-rw-r--r--sound/soc/codecs/wm8753.c3
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c2
-rw-r--r--sound/soc/codecs/wm8962.c4
-rw-r--r--sound/soc/codecs/wm8993.c2
-rw-r--r--sound/soc/codecs/wm8994.c62
-rw-r--r--sound/soc/codecs/wm8996.c1
-rw-r--r--sound/soc/codecs/wm9081.c10
-rw-r--r--sound/soc/codecs/wm9090.c6
-rw-r--r--sound/soc/codecs/wm_hubs.c2
-rw-r--r--sound/soc/fsl/fsl_ssi.c1
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c24
-rw-r--r--sound/soc/imx/Kconfig2
-rw-r--r--sound/soc/kirkwood/Kconfig3
-rw-r--r--sound/soc/mxs/mxs-pcm.c3
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c1
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c3
-rw-r--r--sound/soc/pxa/Kconfig3
-rw-r--r--sound/soc/pxa/hx4700.c5
-rw-r--r--sound/soc/samsung/jive_wm8750.c3
-rw-r--r--sound/soc/samsung/smdk2443_wm9710.c1
-rw-r--r--sound/soc/samsung/smdk_wm8994.c1
-rw-r--r--sound/soc/samsung/speyside.c2
-rw-r--r--sound/soc/soc-core.c6
-rw-r--r--sound/soc/soc-utils.c31
-rw-r--r--sound/usb/mixer.c110
-rw-r--r--sound/usb/quirks-table.h31
-rw-r--r--sound/usb/quirks.c7
-rw-r--r--tools/perf/builtin-stat.c3
-rw-r--r--tools/perf/util/evsel.c10
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/hist.c10
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/session.c4
-rw-r--r--tools/perf/util/trace-event-parse.c2
-rwxr-xr-xtools/testing/ktest/ktest.pl531
-rw-r--r--tools/testing/ktest/sample.conf146
1343 files changed, 15868 insertions, 11751 deletions
diff --git a/.mailmap b/.mailmap
index a4806f0de852..9b0d0267a3c3 100644
--- a/.mailmap
+++ b/.mailmap
@@ -68,6 +68,7 @@ Juha Yrjola <juha.yrjola@solidboot.com>
68Kay Sievers <kay.sievers@vrfy.org> 68Kay Sievers <kay.sievers@vrfy.org>
69Kenneth W Chen <kenneth.w.chen@intel.com> 69Kenneth W Chen <kenneth.w.chen@intel.com>
70Koushik <raghavendra.koushik@neterion.com> 70Koushik <raghavendra.koushik@neterion.com>
71Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
71Leonid I Ananiev <leonid.i.ananiev@intel.com> 72Leonid I Ananiev <leonid.i.ananiev@intel.com>
72Linas Vepstas <linas@austin.ibm.com> 73Linas Vepstas <linas@austin.ibm.com>
73Mark Brown <broonie@sirena.org.uk> 74Mark Brown <broonie@sirena.org.uk>
@@ -111,3 +112,4 @@ Uwe Kleine-König <ukl@pengutronix.de>
111Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com> 112Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
112Valdis Kletnieks <Valdis.Kletnieks@vt.edu> 113Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
113Takashi YOSHII <takashi.yoshii.zj@renesas.com> 114Takashi YOSHII <takashi.yoshii.zj@renesas.com>
115Yusuke Goda <goda.yusuke@renesas.com>
diff --git a/CREDITS b/CREDITS
index 07e32a87d956..44fce988eaac 100644
--- a/CREDITS
+++ b/CREDITS
@@ -688,10 +688,13 @@ S: Oxfordshire, UK.
688 688
689N: Kees Cook 689N: Kees Cook
690E: kees@outflux.net 690E: kees@outflux.net
691W: http://outflux.net/ 691E: kees@ubuntu.com
692P: 1024D/17063E6D 9FA3 C49C 23C9 D1BC 2E30 1975 1FFF 4BA9 1706 3E6D 692E: keescook@chromium.org
693D: Minor updates to SCSI types, added /proc/pid/maps protection 693W: http://outflux.net/blog/
694P: 4096R/DC6DC026 A5C3 F68F 229D D60F 723E 6E13 8972 F4DF DC6D C026
695D: Various security things, bug fixes, and documentation.
694S: (ask for current address) 696S: (ask for current address)
697S: Portland, Oregon
695S: USA 698S: USA
696 699
697N: Robin Cornelius 700N: Robin Cornelius
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index 2b5d56127fce..c1eb41cb9876 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -206,16 +206,3 @@ Description:
206 when a discarded area is read the discard_zeroes_data 206 when a discarded area is read the discard_zeroes_data
207 parameter will be set to one. Otherwise it will be 0 and 207 parameter will be set to one. Otherwise it will be 0 and
208 the result of reading a discarded area is undefined. 208 the result of reading a discarded area is undefined.
209What: /sys/block/<disk>/alias
210Date: Aug 2011
211Contact: Nao Nishijima <nao.nishijima.xt@hitachi.com>
212Description:
213 A raw device name of a disk does not always point a same disk
214 each boot-up time. Therefore, users have to use persistent
215 device names, which udev creates when the kernel finds a disk,
216 instead of raw device name. However, kernel doesn't show those
217 persistent names on its messages (e.g. dmesg).
218 This file can store an alias of the disk and it would be
219 appeared in kernel messages if it is set. A disk can have an
220 alias which length is up to 255bytes. Users can use alphabets,
221 numbers, "-" and "_" in alias name. This file is writeonce.
diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd
index fa72ccb2282e..dbedafb095e2 100644
--- a/Documentation/ABI/testing/sysfs-bus-rbd
+++ b/Documentation/ABI/testing/sysfs-bus-rbd
@@ -57,13 +57,6 @@ create_snap
57 57
58 $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create 58 $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
59 59
60rollback_snap
61
62 Rolls back data to the specified snapshot. This goes over the entire
63 list of rados blocks and sends a rollback command to each.
64
65 $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
66
67snap_* 60snap_*
68 61
69 A directory per each snapshot 62 A directory per each snapshot
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index c27915893974..196b8b9dba11 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -32,7 +32,7 @@
32 The Linux DRM layer contains code intended to support the needs 32 The Linux DRM layer contains code intended to support the needs
33 of complex graphics devices, usually containing programmable 33 of complex graphics devices, usually containing programmable
34 pipelines well suited to 3D graphics acceleration. Graphics 34 pipelines well suited to 3D graphics acceleration. Graphics
35 drivers in the kernel can make use of DRM functions to make 35 drivers in the kernel may make use of DRM functions to make
36 tasks like memory management, interrupt handling and DMA easier, 36 tasks like memory management, interrupt handling and DMA easier,
37 and provide a uniform interface to applications. 37 and provide a uniform interface to applications.
38 </para> 38 </para>
@@ -57,10 +57,10 @@
57 existing drivers. 57 existing drivers.
58 </para> 58 </para>
59 <para> 59 <para>
60 First, we'll go over some typical driver initialization 60 First, we go over some typical driver initialization
61 requirements, like setting up command buffers, creating an 61 requirements, like setting up command buffers, creating an
62 initial output configuration, and initializing core services. 62 initial output configuration, and initializing core services.
63 Subsequent sections will cover core internals in more detail, 63 Subsequent sections cover core internals in more detail,
64 providing implementation notes and examples. 64 providing implementation notes and examples.
65 </para> 65 </para>
66 <para> 66 <para>
@@ -74,7 +74,7 @@
74 </para> 74 </para>
75 <para> 75 <para>
76 The core of every DRM driver is struct drm_driver. Drivers 76 The core of every DRM driver is struct drm_driver. Drivers
77 will typically statically initialize a drm_driver structure, 77 typically statically initialize a drm_driver structure,
78 then pass it to drm_init() at load time. 78 then pass it to drm_init() at load time.
79 </para> 79 </para>
80 80
@@ -88,8 +88,8 @@
88 </para> 88 </para>
89 <programlisting> 89 <programlisting>
90 static struct drm_driver driver = { 90 static struct drm_driver driver = {
91 /* don't use mtrr's here, the Xserver or user space app should 91 /* Don't use MTRRs here; the Xserver or userspace app should
92 * deal with them for intel hardware. 92 * deal with them for Intel hardware.
93 */ 93 */
94 .driver_features = 94 .driver_features =
95 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | 95 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
@@ -154,8 +154,8 @@
154 </programlisting> 154 </programlisting>
155 <para> 155 <para>
156 In the example above, taken from the i915 DRM driver, the driver 156 In the example above, taken from the i915 DRM driver, the driver
157 sets several flags indicating what core features it supports. 157 sets several flags indicating what core features it supports;
158 We'll go over the individual callbacks in later sections. Since 158 we go over the individual callbacks in later sections. Since
159 flags indicate which features your driver supports to the DRM 159 flags indicate which features your driver supports to the DRM
160 core, you need to set most of them prior to calling drm_init(). Some, 160 core, you need to set most of them prior to calling drm_init(). Some,
161 like DRIVER_MODESET can be set later based on user supplied parameters, 161 like DRIVER_MODESET can be set later based on user supplied parameters,
@@ -203,8 +203,8 @@
203 <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term> 203 <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
204 <listitem> 204 <listitem>
205 <para> 205 <para>
206 DRIVER_HAVE_IRQ indicates whether the driver has a IRQ 206 DRIVER_HAVE_IRQ indicates whether the driver has an IRQ
207 handler, DRIVER_IRQ_SHARED indicates whether the device &amp; 207 handler. DRIVER_IRQ_SHARED indicates whether the device &amp;
208 handler support shared IRQs (note that this is required of 208 handler support shared IRQs (note that this is required of
209 PCI drivers). 209 PCI drivers).
210 </para> 210 </para>
@@ -214,8 +214,8 @@
214 <term>DRIVER_DMA_QUEUE</term> 214 <term>DRIVER_DMA_QUEUE</term>
215 <listitem> 215 <listitem>
216 <para> 216 <para>
217 If the driver queues DMA requests and completes them 217 Should be set if the driver queues DMA requests and completes them
218 asynchronously, this flag should be set. Deprecated. 218 asynchronously. Deprecated.
219 </para> 219 </para>
220 </listitem> 220 </listitem>
221 </varlistentry> 221 </varlistentry>
@@ -238,7 +238,7 @@
238 </variablelist> 238 </variablelist>
239 <para> 239 <para>
240 In this specific case, the driver requires AGP and supports 240 In this specific case, the driver requires AGP and supports
241 IRQs. DMA, as we'll see, is handled by device specific ioctls 241 IRQs. DMA, as discussed later, is handled by device-specific ioctls
242 in this case. It also supports the kernel mode setting APIs, though 242 in this case. It also supports the kernel mode setting APIs, though
243 unlike in the actual i915 driver source, this example unconditionally 243 unlike in the actual i915 driver source, this example unconditionally
244 exports KMS capability. 244 exports KMS capability.
@@ -269,36 +269,34 @@
269 initial output configuration. 269 initial output configuration.
270 </para> 270 </para>
271 <para> 271 <para>
272 Note that the tasks performed at driver load time must not 272 If compatibility is a concern (e.g. with drivers converted over
273 conflict with DRM client requirements. For instance, if user 273 to the new interfaces from the old ones), care must be taken to
274 prevent device initialization and control that is incompatible with
275 currently active userspace drivers. For instance, if user
274 level mode setting drivers are in use, it would be problematic 276 level mode setting drivers are in use, it would be problematic
275 to perform output discovery &amp; configuration at load time. 277 to perform output discovery &amp; configuration at load time.
276 Likewise, if pre-memory management aware user level drivers are 278 Likewise, if user-level drivers unaware of memory management are
277 in use, memory management and command buffer setup may need to 279 in use, memory management and command buffer setup may need to
278 be omitted. These requirements are driver specific, and care 280 be omitted. These requirements are driver-specific, and care
279 needs to be taken to keep both old and new applications and 281 needs to be taken to keep both old and new applications and
280 libraries working. The i915 driver supports the "modeset" 282 libraries working. The i915 driver supports the "modeset"
281 module parameter to control whether advanced features are 283 module parameter to control whether advanced features are
282 enabled at load time or in legacy fashion. If compatibility is 284 enabled at load time or in legacy fashion.
283 a concern (e.g. with drivers converted over to the new interfaces
284 from the old ones), care must be taken to prevent incompatible
285 device initialization and control with the currently active
286 userspace drivers.
287 </para> 285 </para>
288 286
289 <sect2> 287 <sect2>
290 <title>Driver private &amp; performance counters</title> 288 <title>Driver private &amp; performance counters</title>
291 <para> 289 <para>
292 The driver private hangs off the main drm_device structure and 290 The driver private hangs off the main drm_device structure and
293 can be used for tracking various device specific bits of 291 can be used for tracking various device-specific bits of
294 information, like register offsets, command buffer status, 292 information, like register offsets, command buffer status,
295 register state for suspend/resume, etc. At load time, a 293 register state for suspend/resume, etc. At load time, a
296 driver can simply allocate one and set drm_device.dev_priv 294 driver may simply allocate one and set drm_device.dev_priv
297 appropriately; at unload the driver can free it and set 295 appropriately; it should be freed and drm_device.dev_priv set
298 drm_device.dev_priv to NULL. 296 to NULL when the driver is unloaded.
299 </para> 297 </para>
300 <para> 298 <para>
301 The DRM supports several counters which can be used for rough 299 The DRM supports several counters which may be used for rough
302 performance characterization. Note that the DRM stat counter 300 performance characterization. Note that the DRM stat counter
303 system is not often used by applications, and supporting 301 system is not often used by applications, and supporting
304 additional counters is completely optional. 302 additional counters is completely optional.
@@ -307,15 +305,15 @@
307 These interfaces are deprecated and should not be used. If performance 305 These interfaces are deprecated and should not be used. If performance
308 monitoring is desired, the developer should investigate and 306 monitoring is desired, the developer should investigate and
309 potentially enhance the kernel perf and tracing infrastructure to export 307 potentially enhance the kernel perf and tracing infrastructure to export
310 GPU related performance information to performance monitoring 308 GPU related performance information for consumption by performance
311 tools and applications. 309 monitoring tools and applications.
312 </para> 310 </para>
313 </sect2> 311 </sect2>
314 312
315 <sect2> 313 <sect2>
316 <title>Configuring the device</title> 314 <title>Configuring the device</title>
317 <para> 315 <para>
318 Obviously, device configuration will be device specific. 316 Obviously, device configuration is device-specific.
319 However, there are several common operations: finding a 317 However, there are several common operations: finding a
320 device's PCI resources, mapping them, and potentially setting 318 device's PCI resources, mapping them, and potentially setting
321 up an IRQ handler. 319 up an IRQ handler.
@@ -323,10 +321,10 @@
323 <para> 321 <para>
324 Finding &amp; mapping resources is fairly straightforward. The 322 Finding &amp; mapping resources is fairly straightforward. The
325 DRM wrapper functions, drm_get_resource_start() and 323 DRM wrapper functions, drm_get_resource_start() and
326 drm_get_resource_len() can be used to find BARs on the given 324 drm_get_resource_len(), may be used to find BARs on the given
327 drm_device struct. Once those values have been retrieved, the 325 drm_device struct. Once those values have been retrieved, the
328 driver load function can call drm_addmap() to create a new 326 driver load function can call drm_addmap() to create a new
329 mapping for the BAR in question. Note you'll probably want a 327 mapping for the BAR in question. Note that you probably want a
330 drm_local_map_t in your driver private structure to track any 328 drm_local_map_t in your driver private structure to track any
331 mappings you create. 329 mappings you create.
332<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* --> 330<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* -->
@@ -335,20 +333,20 @@
335 <para> 333 <para>
336 if compatibility with other operating systems isn't a concern 334 if compatibility with other operating systems isn't a concern
337 (DRM drivers can run under various BSD variants and OpenSolaris), 335 (DRM drivers can run under various BSD variants and OpenSolaris),
338 native Linux calls can be used for the above, e.g. pci_resource_* 336 native Linux calls may be used for the above, e.g. pci_resource_*
339 and iomap*/iounmap. See the Linux device driver book for more 337 and iomap*/iounmap. See the Linux device driver book for more
340 info. 338 info.
341 </para> 339 </para>
342 <para> 340 <para>
343 Once you have a register map, you can use the DRM_READn() and 341 Once you have a register map, you may use the DRM_READn() and
344 DRM_WRITEn() macros to access the registers on your device, or 342 DRM_WRITEn() macros to access the registers on your device, or
345 use driver specific versions to offset into your MMIO space 343 use driver-specific versions to offset into your MMIO space
346 relative to a driver specific base pointer (see I915_READ for 344 relative to a driver-specific base pointer (see I915_READ for
347 example). 345 an example).
348 </para> 346 </para>
349 <para> 347 <para>
350 If your device supports interrupt generation, you may want to 348 If your device supports interrupt generation, you may want to
351 setup an interrupt handler at driver load time as well. This 349 set up an interrupt handler when the driver is loaded. This
352 is done using the drm_irq_install() function. If your device 350 is done using the drm_irq_install() function. If your device
353 supports vertical blank interrupts, it should call 351 supports vertical blank interrupts, it should call
354 drm_vblank_init() to initialize the core vblank handling code before 352 drm_vblank_init() to initialize the core vblank handling code before
@@ -357,7 +355,7 @@
357 </para> 355 </para>
358<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install--> 356<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
359 <para> 357 <para>
360 Once your interrupt handler is registered (it'll use your 358 Once your interrupt handler is registered (it uses your
361 drm_driver.irq_handler as the actual interrupt handling 359 drm_driver.irq_handler as the actual interrupt handling
362 function), you can safely enable interrupts on your device, 360 function), you can safely enable interrupts on your device,
363 assuming any other state your interrupt handler uses is also 361 assuming any other state your interrupt handler uses is also
@@ -371,10 +369,10 @@
371 using the pci_map_rom() call, a convenience function that 369 using the pci_map_rom() call, a convenience function that
372 takes care of mapping the actual ROM, whether it has been 370 takes care of mapping the actual ROM, whether it has been
373 shadowed into memory (typically at address 0xc0000) or exists 371 shadowed into memory (typically at address 0xc0000) or exists
374 on the PCI device in the ROM BAR. Note that once you've 372 on the PCI device in the ROM BAR. Note that after the ROM
375 mapped the ROM and extracted any necessary information, be 373 has been mapped and any necessary information has been extracted,
376 sure to unmap it; on many devices the ROM address decoder is 374 it should be unmapped; on many devices, the ROM address decoder is
377 shared with other BARs, so leaving it mapped can cause 375 shared with other BARs, so leaving it mapped could cause
378 undesired behavior like hangs or memory corruption. 376 undesired behavior like hangs or memory corruption.
379<!--!Fdrivers/pci/rom.c pci_map_rom--> 377<!--!Fdrivers/pci/rom.c pci_map_rom-->
380 </para> 378 </para>
@@ -389,9 +387,9 @@
389 should support a memory manager. 387 should support a memory manager.
390 </para> 388 </para>
391 <para> 389 <para>
392 If your driver supports memory management (it should!), you'll 390 If your driver supports memory management (it should!), you
393 need to set that up at load time as well. How you initialize 391 need to set that up at load time as well. How you initialize
394 it depends on which memory manager you're using, TTM or GEM. 392 it depends on which memory manager you're using: TTM or GEM.
395 </para> 393 </para>
396 <sect3> 394 <sect3>
397 <title>TTM initialization</title> 395 <title>TTM initialization</title>
@@ -401,7 +399,7 @@
401 and devices with dedicated video RAM (VRAM), i.e. most discrete 399 and devices with dedicated video RAM (VRAM), i.e. most discrete
402 graphics devices. If your device has dedicated RAM, supporting 400 graphics devices. If your device has dedicated RAM, supporting
403 TTM is desirable. TTM also integrates tightly with your 401 TTM is desirable. TTM also integrates tightly with your
404 driver specific buffer execution function. See the radeon 402 driver-specific buffer execution function. See the radeon
405 driver for examples. 403 driver for examples.
406 </para> 404 </para>
407 <para> 405 <para>
@@ -429,21 +427,21 @@
429 created by the memory manager at runtime. Your global TTM should 427 created by the memory manager at runtime. Your global TTM should
430 have a type of TTM_GLOBAL_TTM_MEM. The size field for the global 428 have a type of TTM_GLOBAL_TTM_MEM. The size field for the global
431 object should be sizeof(struct ttm_mem_global), and the init and 429 object should be sizeof(struct ttm_mem_global), and the init and
432 release hooks should point at your driver specific init and 430 release hooks should point at your driver-specific init and
433 release routines, which will probably eventually call 431 release routines, which probably eventually call
434 ttm_mem_global_init and ttm_mem_global_release respectively. 432 ttm_mem_global_init and ttm_mem_global_release, respectively.
435 </para> 433 </para>
436 <para> 434 <para>
437 Once your global TTM accounting structure is set up and initialized 435 Once your global TTM accounting structure is set up and initialized
438 (done by calling ttm_global_item_ref on the global object you 436 by calling ttm_global_item_ref() on it,
439 just created), you'll need to create a buffer object TTM to 437 you need to create a buffer object TTM to
440 provide a pool for buffer object allocation by clients and the 438 provide a pool for buffer object allocation by clients and the
441 kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO, 439 kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO,
442 and its size should be sizeof(struct ttm_bo_global). Again, 440 and its size should be sizeof(struct ttm_bo_global). Again,
443 driver specific init and release functions can be provided, 441 driver-specific init and release functions may be provided,
444 likely eventually calling ttm_bo_global_init and 442 likely eventually calling ttm_bo_global_init() and
445 ttm_bo_global_release, respectively. Also like the previous 443 ttm_bo_global_release(), respectively. Also, like the previous
446 object, ttm_global_item_ref is used to create an initial reference 444 object, ttm_global_item_ref() is used to create an initial reference
447 count for the TTM, which will call your initialization function. 445 count for the TTM, which will call your initialization function.
448 </para> 446 </para>
449 </sect3> 447 </sect3>
@@ -453,27 +451,26 @@
453 GEM is an alternative to TTM, designed specifically for UMA 451 GEM is an alternative to TTM, designed specifically for UMA
454 devices. It has simpler initialization and execution requirements 452 devices. It has simpler initialization and execution requirements
455 than TTM, but has no VRAM management capability. Core GEM 453 than TTM, but has no VRAM management capability. Core GEM
456 initialization is comprised of a basic drm_mm_init call to create 454 is initialized by calling drm_mm_init() to create
457 a GTT DRM MM object, which provides an address space pool for 455 a GTT DRM MM object, which provides an address space pool for
458 object allocation. In a KMS configuration, the driver will 456 object allocation. In a KMS configuration, the driver
459 need to allocate and initialize a command ring buffer following 457 needs to allocate and initialize a command ring buffer following
460 basic GEM initialization. Most UMA devices have a so-called 458 core GEM initialization. A UMA device usually has what is called a
461 "stolen" memory region, which provides space for the initial 459 "stolen" memory region, which provides space for the initial
462 framebuffer and large, contiguous memory regions required by the 460 framebuffer and large, contiguous memory regions required by the
463 device. This space is not typically managed by GEM, and must 461 device. This space is not typically managed by GEM, and it must
464 be initialized separately into its own DRM MM object. 462 be initialized separately into its own DRM MM object.
465 </para> 463 </para>
466 <para> 464 <para>
467 Initialization will be driver specific, and will depend on 465 Initialization is driver-specific. In the case of Intel
468 the architecture of the device. In the case of Intel
469 integrated graphics chips like 965GM, GEM initialization can 466 integrated graphics chips like 965GM, GEM initialization can
470 be done by calling the internal GEM init function, 467 be done by calling the internal GEM init function,
471 i915_gem_do_init(). Since the 965GM is a UMA device 468 i915_gem_do_init(). Since the 965GM is a UMA device
472 (i.e. it doesn't have dedicated VRAM), GEM will manage 469 (i.e. it doesn't have dedicated VRAM), GEM manages
473 making regular RAM available for GPU operations. Memory set 470 making regular RAM available for GPU operations. Memory set
474 aside by the BIOS (called "stolen" memory by the i915 471 aside by the BIOS (called "stolen" memory by the i915
475 driver) will be managed by the DRM memrange allocator; the 472 driver) is managed by the DRM memrange allocator; the
476 rest of the aperture will be managed by GEM. 473 rest of the aperture is managed by GEM.
477 <programlisting> 474 <programlisting>
478 /* Basic memrange allocator for stolen space (aka vram) */ 475 /* Basic memrange allocator for stolen space (aka vram) */
479 drm_memrange_init(&amp;dev_priv->vram, 0, prealloc_size); 476 drm_memrange_init(&amp;dev_priv->vram, 0, prealloc_size);
@@ -483,7 +480,7 @@
483<!--!Edrivers/char/drm/drm_memrange.c--> 480<!--!Edrivers/char/drm/drm_memrange.c-->
484 </para> 481 </para>
485 <para> 482 <para>
486 Once the memory manager has been set up, we can allocate the 483 Once the memory manager has been set up, we may allocate the
487 command buffer. In the i915 case, this is also done with a 484 command buffer. In the i915 case, this is also done with a
488 GEM function, i915_gem_init_ringbuffer(). 485 GEM function, i915_gem_init_ringbuffer().
489 </para> 486 </para>
@@ -493,16 +490,25 @@
493 <sect2> 490 <sect2>
494 <title>Output configuration</title> 491 <title>Output configuration</title>
495 <para> 492 <para>
496 The final initialization task is output configuration. This involves 493 The final initialization task is output configuration. This involves:
497 finding and initializing the CRTCs, encoders and connectors 494 <itemizedlist>
498 for your device, creating an initial configuration and 495 <listitem>
499 registering a framebuffer console driver. 496 Finding and initializing the CRTCs, encoders, and connectors
497 for the device.
498 </listitem>
499 <listitem>
500 Creating an initial configuration.
501 </listitem>
502 <listitem>
503 Registering a framebuffer console driver.
504 </listitem>
505 </itemizedlist>
500 </para> 506 </para>
501 <sect3> 507 <sect3>
502 <title>Output discovery and initialization</title> 508 <title>Output discovery and initialization</title>
503 <para> 509 <para>
504 Several core functions exist to create CRTCs, encoders and 510 Several core functions exist to create CRTCs, encoders, and
505 connectors, namely drm_crtc_init(), drm_connector_init() and 511 connectors, namely: drm_crtc_init(), drm_connector_init(), and
506 drm_encoder_init(), along with several "helper" functions to 512 drm_encoder_init(), along with several "helper" functions to
507 perform common tasks. 513 perform common tasks.
508 </para> 514 </para>
@@ -555,10 +561,10 @@ void intel_crt_init(struct drm_device *dev)
555 </programlisting> 561 </programlisting>
556 <para> 562 <para>
557 In the example above (again, taken from the i915 driver), a 563 In the example above (again, taken from the i915 driver), a
558 CRT connector and encoder combination is created. A device 564 CRT connector and encoder combination is created. A device-specific
559 specific i2c bus is also created, for fetching EDID data and 565 i2c bus is also created for fetching EDID data and
560 performing monitor detection. Once the process is complete, 566 performing monitor detection. Once the process is complete,
561 the new connector is registered with sysfs, to make its 567 the new connector is registered with sysfs to make its
562 properties available to applications. 568 properties available to applications.
563 </para> 569 </para>
564 <sect4> 570 <sect4>
@@ -567,12 +573,12 @@ void intel_crt_init(struct drm_device *dev)
567 Since many PC-class graphics devices have similar display output 573 Since many PC-class graphics devices have similar display output
568 designs, the DRM provides a set of helper functions to make 574 designs, the DRM provides a set of helper functions to make
569 output management easier. The core helper routines handle 575 output management easier. The core helper routines handle
570 encoder re-routing and disabling of unused functions following 576 encoder re-routing and the disabling of unused functions following
571 mode set. Using the helpers is optional, but recommended for 577 mode setting. Using the helpers is optional, but recommended for
572 devices with PC-style architectures (i.e. a set of display planes 578 devices with PC-style architectures (i.e. a set of display planes
573 for feeding pixels to encoders which are in turn routed to 579 for feeding pixels to encoders which are in turn routed to
574 connectors). Devices with more complex requirements needing 580 connectors). Devices with more complex requirements needing
575 finer grained management can opt to use the core callbacks 581 finer grained management may opt to use the core callbacks
576 directly. 582 directly.
577 </para> 583 </para>
578 <para> 584 <para>
@@ -580,17 +586,25 @@ void intel_crt_init(struct drm_device *dev)
580 </para> 586 </para>
581 </sect4> 587 </sect4>
582 <para> 588 <para>
583 For each encoder, CRTC and connector, several functions must 589 Each encoder object needs to provide:
584 be provided, depending on the object type. Encoder objects 590 <itemizedlist>
585 need to provide a DPMS (basically on/off) function, mode fixup 591 <listitem>
586 (for converting requested modes into native hardware timings), 592 A DPMS (basically on/off) function.
587 and prepare, set and commit functions for use by the core DRM 593 </listitem>
588 helper functions. Connector helpers need to provide mode fetch and 594 <listitem>
589 validity functions as well as an encoder matching function for 595 A mode-fixup function (for converting requested modes into
590 returning an ideal encoder for a given connector. The core 596 native hardware timings).
591 connector functions include a DPMS callback, (deprecated) 597 </listitem>
592 save/restore routines, detection, mode probing, property handling, 598 <listitem>
593 and cleanup functions. 599 Functions (prepare, set, and commit) for use by the core DRM
600 helper functions.
601 </listitem>
602 </itemizedlist>
603 Connector helpers need to provide functions (mode-fetch, validity,
604 and encoder-matching) for returning an ideal encoder for a given
605 connector. The core connector functions include a DPMS callback,
606 save/restore routines (deprecated), detection, mode probing,
607 property handling, and cleanup functions.
594 </para> 608 </para>
595<!--!Edrivers/char/drm/drm_crtc.h--> 609<!--!Edrivers/char/drm/drm_crtc.h-->
596<!--!Edrivers/char/drm/drm_crtc.c--> 610<!--!Edrivers/char/drm/drm_crtc.c-->
@@ -605,23 +619,34 @@ void intel_crt_init(struct drm_device *dev)
605 <title>VBlank event handling</title> 619 <title>VBlank event handling</title>
606 <para> 620 <para>
607 The DRM core exposes two vertical blank related ioctls: 621 The DRM core exposes two vertical blank related ioctls:
608 DRM_IOCTL_WAIT_VBLANK and DRM_IOCTL_MODESET_CTL. 622 <variablelist>
623 <varlistentry>
624 <term>DRM_IOCTL_WAIT_VBLANK</term>
625 <listitem>
626 <para>
627 This takes a struct drm_wait_vblank structure as its argument,
628 and it is used to block or request a signal when a specified
629 vblank event occurs.
630 </para>
631 </listitem>
632 </varlistentry>
633 <varlistentry>
634 <term>DRM_IOCTL_MODESET_CTL</term>
635 <listitem>
636 <para>
637 This should be called by application level drivers before and
638 after mode setting, since on many devices the vertical blank
639 counter is reset at that time. Internally, the DRM snapshots
640 the last vblank count when the ioctl is called with the
641 _DRM_PRE_MODESET command, so that the counter won't go backwards
642 (which is dealt with when _DRM_POST_MODESET is used).
643 </para>
644 </listitem>
645 </varlistentry>
646 </variablelist>
609<!--!Edrivers/char/drm/drm_irq.c--> 647<!--!Edrivers/char/drm/drm_irq.c-->
610 </para> 648 </para>
611 <para> 649 <para>
612 DRM_IOCTL_WAIT_VBLANK takes a struct drm_wait_vblank structure
613 as its argument, and is used to block or request a signal when a
614 specified vblank event occurs.
615 </para>
616 <para>
617 DRM_IOCTL_MODESET_CTL should be called by application level
618 drivers before and after mode setting, since on many devices the
619 vertical blank counter will be reset at that time. Internally,
620 the DRM snapshots the last vblank count when the ioctl is called
621 with the _DRM_PRE_MODESET command so that the counter won't go
622 backwards (which is dealt with when _DRM_POST_MODESET is used).
623 </para>
624 <para>
625 To support the functions above, the DRM core provides several 650 To support the functions above, the DRM core provides several
626 helper functions for tracking vertical blank counters, and 651 helper functions for tracking vertical blank counters, and
627 requires drivers to provide several callbacks: 652 requires drivers to provide several callbacks:
@@ -632,24 +657,24 @@ void intel_crt_init(struct drm_device *dev)
632 register. The enable and disable vblank callbacks should enable 657 register. The enable and disable vblank callbacks should enable
633 and disable vertical blank interrupts, respectively. In the 658 and disable vertical blank interrupts, respectively. In the
634 absence of DRM clients waiting on vblank events, the core DRM 659 absence of DRM clients waiting on vblank events, the core DRM
635 code will use the disable_vblank() function to disable 660 code uses the disable_vblank() function to disable
636 interrupts, which saves power. They'll be re-enabled again when 661 interrupts, which saves power. They are re-enabled again when
637 a client calls the vblank wait ioctl above. 662 a client calls the vblank wait ioctl above.
638 </para> 663 </para>
639 <para> 664 <para>
640 Devices that don't provide a count register can simply use an 665 A device that doesn't provide a count register may simply use an
641 internal atomic counter incremented on every vertical blank 666 internal atomic counter incremented on every vertical blank
642 interrupt, and can make their enable and disable vblank 667 interrupt (and then treat the enable_vblank() and disable_vblank()
643 functions into no-ops. 668 callbacks as no-ops).
644 </para> 669 </para>
645 </sect1> 670 </sect1>
646 671
647 <sect1> 672 <sect1>
648 <title>Memory management</title> 673 <title>Memory management</title>
649 <para> 674 <para>
650 The memory manager lies at the heart of many DRM operations, and 675 The memory manager lies at the heart of many DRM operations; it
651 is also required to support advanced client features like OpenGL 676 is required to support advanced client features like OpenGL
652 pbuffers. The DRM currently contains two memory managers, TTM 677 pbuffers. The DRM currently contains two memory managers: TTM
653 and GEM. 678 and GEM.
654 </para> 679 </para>
655 680
@@ -679,41 +704,46 @@ void intel_crt_init(struct drm_device *dev)
679 <para> 704 <para>
680 GEM-enabled drivers must provide gem_init_object() and 705 GEM-enabled drivers must provide gem_init_object() and
681 gem_free_object() callbacks to support the core memory 706 gem_free_object() callbacks to support the core memory
682 allocation routines. They should also provide several driver 707 allocation routines. They should also provide several driver-specific
683 specific ioctls to support command execution, pinning, buffer 708 ioctls to support command execution, pinning, buffer
684 read &amp; write, mapping, and domain ownership transfers. 709 read &amp; write, mapping, and domain ownership transfers.
685 </para> 710 </para>
686 <para> 711 <para>
687 On a fundamental level, GEM involves several operations: memory 712 On a fundamental level, GEM involves several operations:
688 allocation and freeing, command execution, and aperture management 713 <itemizedlist>
689 at command execution time. Buffer object allocation is relatively 714 <listitem>Memory allocation and freeing</listitem>
715 <listitem>Command execution</listitem>
716 <listitem>Aperture management at command execution time</listitem>
717 </itemizedlist>
718 Buffer object allocation is relatively
690 straightforward and largely provided by Linux's shmem layer, which 719 straightforward and largely provided by Linux's shmem layer, which
691 provides memory to back each object. When mapped into the GTT 720 provides memory to back each object. When mapped into the GTT
692 or used in a command buffer, the backing pages for an object are 721 or used in a command buffer, the backing pages for an object are
693 flushed to memory and marked write combined so as to be coherent 722 flushed to memory and marked write combined so as to be coherent
694 with the GPU. Likewise, when the GPU finishes rendering to an object, 723 with the GPU. Likewise, if the CPU accesses an object after the GPU
695 if the CPU accesses it, it must be made coherent with the CPU's view 724 has finished rendering to the object, then the object must be made
725 coherent with the CPU's view
696 of memory, usually involving GPU cache flushing of various kinds. 726 of memory, usually involving GPU cache flushing of various kinds.
697 This core CPU&lt;-&gt;GPU coherency management is provided by the GEM 727 This core CPU&lt;-&gt;GPU coherency management is provided by a
698 set domain function, which evaluates an object's current domain and 728 device-specific ioctl, which evaluates an object's current domain and
699 performs any necessary flushing or synchronization to put the object 729 performs any necessary flushing or synchronization to put the object
700 into the desired coherency domain (note that the object may be busy, 730 into the desired coherency domain (note that the object may be busy,
701 i.e. an active render target; in that case the set domain function 731 i.e. an active render target; in that case, setting the domain
702 will block the client and wait for rendering to complete before 732 blocks the client and waits for rendering to complete before
703 performing any necessary flushing operations). 733 performing any necessary flushing operations).
704 </para> 734 </para>
705 <para> 735 <para>
706 Perhaps the most important GEM function is providing a command 736 Perhaps the most important GEM function is providing a command
707 execution interface to clients. Client programs construct command 737 execution interface to clients. Client programs construct command
708 buffers containing references to previously allocated memory objects 738 buffers containing references to previously allocated memory objects,
709 and submit them to GEM. At that point, GEM will take care to bind 739 and then submit them to GEM. At that point, GEM takes care to bind
710 all the objects into the GTT, execute the buffer, and provide 740 all the objects into the GTT, execute the buffer, and provide
711 necessary synchronization between clients accessing the same buffers. 741 necessary synchronization between clients accessing the same buffers.
712 This often involves evicting some objects from the GTT and re-binding 742 This often involves evicting some objects from the GTT and re-binding
713 others (a fairly expensive operation), and providing relocation 743 others (a fairly expensive operation), and providing relocation
714 support which hides fixed GTT offsets from clients. Clients must 744 support which hides fixed GTT offsets from clients. Clients must
715 take care not to submit command buffers that reference more objects 745 take care not to submit command buffers that reference more objects
716 than can fit in the GTT or GEM will reject them and no rendering 746 than can fit in the GTT; otherwise, GEM will reject them and no rendering
717 will occur. Similarly, if several objects in the buffer require 747 will occur. Similarly, if several objects in the buffer require
718 fence registers to be allocated for correct rendering (e.g. 2D blits 748 fence registers to be allocated for correct rendering (e.g. 2D blits
719 on pre-965 chips), care must be taken not to require more fence 749 on pre-965 chips), care must be taken not to require more fence
@@ -729,7 +759,7 @@ void intel_crt_init(struct drm_device *dev)
729 <title>Output management</title> 759 <title>Output management</title>
730 <para> 760 <para>
731 At the core of the DRM output management code is a set of 761 At the core of the DRM output management code is a set of
732 structures representing CRTCs, encoders and connectors. 762 structures representing CRTCs, encoders, and connectors.
733 </para> 763 </para>
734 <para> 764 <para>
735 A CRTC is an abstraction representing a part of the chip that 765 A CRTC is an abstraction representing a part of the chip that
@@ -765,21 +795,19 @@ void intel_crt_init(struct drm_device *dev)
765 <sect1> 795 <sect1>
766 <title>Framebuffer management</title> 796 <title>Framebuffer management</title>
767 <para> 797 <para>
768 In order to set a mode on a given CRTC, encoder and connector 798 Clients need to provide a framebuffer object which provides a source
769 configuration, clients need to provide a framebuffer object which 799 of pixels for a CRTC to deliver to the encoder(s) and ultimately the
770 will provide a source of pixels for the CRTC to deliver to the encoder(s) 800 connector(s). A framebuffer is fundamentally a driver-specific memory
771 and ultimately the connector(s) in the configuration. A framebuffer 801 object, made into an opaque handle by the DRM's addfb() function.
772 is fundamentally a driver specific memory object, made into an opaque 802 Once a framebuffer has been created this way, it may be passed to the
773 handle by the DRM addfb function. Once an fb has been created this 803 KMS mode setting routines for use in a completed configuration.
774 way it can be passed to the KMS mode setting routines for use in
775 a configuration.
776 </para> 804 </para>
777 </sect1> 805 </sect1>
778 806
779 <sect1> 807 <sect1>
780 <title>Command submission &amp; fencing</title> 808 <title>Command submission &amp; fencing</title>
781 <para> 809 <para>
782 This should cover a few device specific command submission 810 This should cover a few device-specific command submission
783 implementations. 811 implementations.
784 </para> 812 </para>
785 </sect1> 813 </sect1>
@@ -789,7 +817,7 @@ void intel_crt_init(struct drm_device *dev)
789 <para> 817 <para>
790 The DRM core provides some suspend/resume code, but drivers 818 The DRM core provides some suspend/resume code, but drivers
791 wanting full suspend/resume support should provide save() and 819 wanting full suspend/resume support should provide save() and
792 restore() functions. These will be called at suspend, 820 restore() functions. These are called at suspend,
793 hibernate, or resume time, and should perform any state save or 821 hibernate, or resume time, and should perform any state save or
794 restore required by your device across suspend or hibernate 822 restore required by your device across suspend or hibernate
795 states. 823 states.
@@ -812,8 +840,8 @@ void intel_crt_init(struct drm_device *dev)
812 <para> 840 <para>
813 The DRM core exports several interfaces to applications, 841 The DRM core exports several interfaces to applications,
814 generally intended to be used through corresponding libdrm 842 generally intended to be used through corresponding libdrm
815 wrapper functions. In addition, drivers export device specific 843 wrapper functions. In addition, drivers export device-specific
816 interfaces for use by userspace drivers &amp; device aware 844 interfaces for use by userspace drivers &amp; device-aware
817 applications through ioctls and sysfs files. 845 applications through ioctls and sysfs files.
818 </para> 846 </para>
819 <para> 847 <para>
@@ -822,8 +850,8 @@ void intel_crt_init(struct drm_device *dev)
822 management, memory management, and output management. 850 management, memory management, and output management.
823 </para> 851 </para>
824 <para> 852 <para>
825 Cover generic ioctls and sysfs layout here. Only need high 853 Cover generic ioctls and sysfs layout here. We only need high-level
826 level info, since man pages will cover the rest. 854 info, since man pages should cover the rest.
827 </para> 855 </para>
828 </chapter> 856 </chapter>
829 857
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
index 54883de5d5f9..ac3d0018140c 100644
--- a/Documentation/DocBook/uio-howto.tmpl
+++ b/Documentation/DocBook/uio-howto.tmpl
@@ -521,6 +521,11 @@ Here's a description of the fields of <varname>struct uio_mem</varname>:
521 521
522<itemizedlist> 522<itemizedlist>
523<listitem><para> 523<listitem><para>
524<varname>const char *name</varname>: Optional. Set this to help identify
525the memory region, it will show up in the corresponding sysfs node.
526</para></listitem>
527
528<listitem><para>
524<varname>int memtype</varname>: Required if the mapping is used. Set this to 529<varname>int memtype</varname>: Required if the mapping is used. Set this to
525<varname>UIO_MEM_PHYS</varname> if you you have physical memory on your 530<varname>UIO_MEM_PHYS</varname> if you you have physical memory on your
526card to be mapped. Use <varname>UIO_MEM_LOGICAL</varname> for logical 531card to be mapped. Use <varname>UIO_MEM_LOGICAL</varname> for logical
@@ -553,7 +558,7 @@ instead to remember such an address.
553</itemizedlist> 558</itemizedlist>
554 559
555<para> 560<para>
556Please do not touch the <varname>kobj</varname> element of 561Please do not touch the <varname>map</varname> element of
557<varname>struct uio_mem</varname>! It is used by the UIO framework 562<varname>struct uio_mem</varname>! It is used by the UIO framework
558to set up sysfs files for this mapping. Simply leave it alone. 563to set up sysfs files for this mapping. Simply leave it alone.
559</para> 564</para>
diff --git a/Documentation/blockdev/cciss.txt b/Documentation/blockdev/cciss.txt
index 71464e09ec18..b79d0a13e7cd 100644
--- a/Documentation/blockdev/cciss.txt
+++ b/Documentation/blockdev/cciss.txt
@@ -98,14 +98,12 @@ You must enable "SCSI tape drive support for Smart Array 5xxx" and
98"SCSI support" in your kernel configuration to be able to use SCSI 98"SCSI support" in your kernel configuration to be able to use SCSI
99tape drives with your Smart Array 5xxx controller. 99tape drives with your Smart Array 5xxx controller.
100 100
101Additionally, note that the driver will not engage the SCSI core at init 101Additionally, note that the driver will engage the SCSI core at init
102time. The driver must be directed to dynamically engage the SCSI core via 102time if any tape drives or medium changers are detected. The driver may
103the /proc filesystem entry which the "block" side of the driver creates as 103also be directed to dynamically engage the SCSI core via the /proc filesystem
104/proc/driver/cciss/cciss* at runtime. This is because at driver init time, 104entry which the "block" side of the driver creates as
105the SCSI core may not yet be initialized (because the driver is a block 105/proc/driver/cciss/cciss* at runtime. This is best done via a script.
106driver) and attempting to register it with the SCSI core in such a case 106
107would cause a hang. This is best done via an initialization script
108(typically in /etc/init.d, but could vary depending on distribution).
109For example: 107For example:
110 108
111 for x in /proc/driver/cciss/cciss[0-9]* 109 for x in /proc/driver/cciss/cciss[0-9]*
diff --git a/Documentation/cgroups/freezer-subsystem.txt b/Documentation/cgroups/freezer-subsystem.txt
index c21d77742a07..7e62de1e59ff 100644
--- a/Documentation/cgroups/freezer-subsystem.txt
+++ b/Documentation/cgroups/freezer-subsystem.txt
@@ -33,9 +33,9 @@ demonstrate this problem using nested bash shells:
33 33
34 From a second, unrelated bash shell: 34 From a second, unrelated bash shell:
35 $ kill -SIGSTOP 16690 35 $ kill -SIGSTOP 16690
36 $ kill -SIGCONT 16990 36 $ kill -SIGCONT 16690
37 37
38 <at this point 16990 exits and causes 16644 to exit too> 38 <at this point 16690 exits and causes 16644 to exit too>
39 39
40This happens because bash can observe both signals and choose how it 40This happens because bash can observe both signals and choose how it
41responds to them. 41responds to them.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index e8552782b440..874921e97802 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -33,6 +33,7 @@ qcom Qualcomm, Inc.
33ramtron Ramtron International 33ramtron Ramtron International
34samsung Samsung Semiconductor 34samsung Samsung Semiconductor
35schindler Schindler 35schindler Schindler
36sil Silicon Image
36simtek 37simtek
37sirf SiRF Technology, Inc. 38sirf SiRF Technology, Inc.
38stericsson ST-Ericsson 39stericsson ST-Ericsson
diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt
index 64087c34327f..7671352216f1 100644
--- a/Documentation/filesystems/btrfs.txt
+++ b/Documentation/filesystems/btrfs.txt
@@ -63,8 +63,8 @@ IRC network.
63Userspace tools for creating and manipulating Btrfs file systems are 63Userspace tools for creating and manipulating Btrfs file systems are
64available from the git repository at the following location: 64available from the git repository at the following location:
65 65
66 http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs-unstable.git 66 http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs.git
67 git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs-unstable.git 67 git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs.git
68 68
69These include the following tools: 69These include the following tools:
70 70
diff --git a/Documentation/i2c/ten-bit-addresses b/Documentation/i2c/ten-bit-addresses
index e9890709c508..cdfe13901b99 100644
--- a/Documentation/i2c/ten-bit-addresses
+++ b/Documentation/i2c/ten-bit-addresses
@@ -1,22 +1,24 @@
1The I2C protocol knows about two kinds of device addresses: normal 7 bit 1The I2C protocol knows about two kinds of device addresses: normal 7 bit
2addresses, and an extended set of 10 bit addresses. The sets of addresses 2addresses, and an extended set of 10 bit addresses. The sets of addresses
3do not intersect: the 7 bit address 0x10 is not the same as the 10 bit 3do not intersect: the 7 bit address 0x10 is not the same as the 10 bit
4address 0x10 (though a single device could respond to both of them). You 4address 0x10 (though a single device could respond to both of them).
5select a 10 bit address by adding an extra byte after the address
6byte:
7 S Addr7 Rd/Wr ....
8becomes
9 S 11110 Addr10 Rd/Wr
10S is the start bit, Rd/Wr the read/write bit, and if you count the number
11of bits, you will see the there are 8 after the S bit for 7 bit addresses,
12and 16 after the S bit for 10 bit addresses.
13 5
14WARNING! The current 10 bit address support is EXPERIMENTAL. There are 6I2C messages to and from 10-bit address devices have a different format.
15several places in the code that will cause SEVERE PROBLEMS with 10 bit 7See the I2C specification for the details.
16addresses, even though there is some basic handling and hooks. Also,
17almost no supported adapter handles the 10 bit addresses correctly.
18 8
19As soon as a real 10 bit address device is spotted 'in the wild', we 9The current 10 bit address support is minimal. It should work, however
20can and will add proper support. Right now, 10 bit address devices 10you can expect some problems along the way:
21are defined by the I2C protocol, but we have never seen a single device 11* Not all bus drivers support 10-bit addresses. Some don't because the
22which supports them. 12 hardware doesn't support them (SMBus doesn't require 10-bit address
13 support for example), some don't because nobody bothered adding the
14 code (or it's there but not working properly.) Software implementation
15 (i2c-algo-bit) is known to work.
16* Some optional features do not support 10-bit addresses. This is the
17 case of automatic detection and instantiation of devices by their,
18 drivers, for example.
19* Many user-space packages (for example i2c-tools) lack support for
20 10-bit addresses.
21
22Note that 10-bit address devices are still pretty rare, so the limitations
23listed above could stay for a long time, maybe even forever if nobody
24needs them to be fixed.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index a0c5c5f4fce6..81c287fad79d 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -315,12 +315,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
315 CPU-intensive style benchmark, and it can vary highly in 315 CPU-intensive style benchmark, and it can vary highly in
316 a microbenchmark depending on workload and compiler. 316 a microbenchmark depending on workload and compiler.
317 317
318 1: only for 32-bit processes 318 32: only for 32-bit processes
319 2: only for 64-bit processes 319 64: only for 64-bit processes
320 on: enable for both 32- and 64-bit processes 320 on: enable for both 32- and 64-bit processes
321 off: disable for both 32- and 64-bit processes 321 off: disable for both 32- and 64-bit processes
322 322
323 amd_iommu= [HW,X86-84] 323 amd_iommu= [HW,X86-64]
324 Pass parameters to the AMD IOMMU driver in the system. 324 Pass parameters to the AMD IOMMU driver in the system.
325 Possible values are: 325 Possible values are:
326 fullflush - enable flushing of IO/TLB entries when 326 fullflush - enable flushing of IO/TLB entries when
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index cb7f3148035d..589f2da5d545 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -20,7 +20,7 @@ ip_no_pmtu_disc - BOOLEAN
20 default FALSE 20 default FALSE
21 21
22min_pmtu - INTEGER 22min_pmtu - INTEGER
23 default 562 - minimum discovered Path MTU 23 default 552 - minimum discovered Path MTU
24 24
25route/max_size - INTEGER 25route/max_size - INTEGER
26 Maximum number of routes allowed in the kernel. Increase 26 Maximum number of routes allowed in the kernel. Increase
@@ -282,11 +282,11 @@ tcp_max_ssthresh - INTEGER
282 Default: 0 (off) 282 Default: 0 (off)
283 283
284tcp_max_syn_backlog - INTEGER 284tcp_max_syn_backlog - INTEGER
285 Maximal number of remembered connection requests, which are 285 Maximal number of remembered connection requests, which have not
286 still did not receive an acknowledgment from connecting client. 286 received an acknowledgment from connecting client.
287 Default value is 1024 for systems with more than 128Mb of memory, 287 The minimal value is 128 for low memory machines, and it will
288 and 128 for low memory machines. If server suffers of overload, 288 increase in proportion to the memory of machine.
289 try to increase this number. 289 If server suffers from overload, try increasing this number.
290 290
291tcp_max_tw_buckets - INTEGER 291tcp_max_tw_buckets - INTEGER
292 Maximal number of timewait sockets held by system simultaneously. 292 Maximal number of timewait sockets held by system simultaneously.
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 646a89e0c07d..3139fb505dce 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -123,9 +123,10 @@ please refer directly to the source code for more information about it.
123Subsystem-Level Methods 123Subsystem-Level Methods
124----------------------- 124-----------------------
125The core methods to suspend and resume devices reside in struct dev_pm_ops 125The core methods to suspend and resume devices reside in struct dev_pm_ops
126pointed to by the pm member of struct bus_type, struct device_type and 126pointed to by the ops member of struct dev_pm_domain, or by the pm member of
127struct class. They are mostly of interest to the people writing infrastructure 127struct bus_type, struct device_type and struct class. They are mostly of
128for buses, like PCI or USB, or device type and device class drivers. 128interest to the people writing infrastructure for platforms and buses, like PCI
129or USB, or device type and device class drivers.
129 130
130Bus drivers implement these methods as appropriate for the hardware and the 131Bus drivers implement these methods as appropriate for the hardware and the
131drivers using it; PCI works differently from USB, and so on. Not many people 132drivers using it; PCI works differently from USB, and so on. Not many people
@@ -139,41 +140,57 @@ sequencing in the driver model tree.
139 140
140/sys/devices/.../power/wakeup files 141/sys/devices/.../power/wakeup files
141----------------------------------- 142-----------------------------------
142All devices in the driver model have two flags to control handling of wakeup 143All device objects in the driver model contain fields that control the handling
143events (hardware signals that can force the device and/or system out of a low 144of system wakeup events (hardware signals that can force the system out of a
144power state). These flags are initialized by bus or device driver code using 145sleep state). These fields are initialized by bus or device driver code using
145device_set_wakeup_capable() and device_set_wakeup_enable(), defined in 146device_set_wakeup_capable() and device_set_wakeup_enable(), defined in
146include/linux/pm_wakeup.h. 147include/linux/pm_wakeup.h.
147 148
148The "can_wakeup" flag just records whether the device (and its driver) can 149The "power.can_wakeup" flag just records whether the device (and its driver) can
149physically support wakeup events. The device_set_wakeup_capable() routine 150physically support wakeup events. The device_set_wakeup_capable() routine
150affects this flag. The "should_wakeup" flag controls whether the device should 151affects this flag. The "power.wakeup" field is a pointer to an object of type
151try to use its wakeup mechanism. device_set_wakeup_enable() affects this flag; 152struct wakeup_source used for controlling whether or not the device should use
152for the most part drivers should not change its value. The initial value of 153its system wakeup mechanism and for notifying the PM core of system wakeup
153should_wakeup is supposed to be false for the majority of devices; the major 154events signaled by the device. This object is only present for wakeup-capable
154exceptions are power buttons, keyboards, and Ethernet adapters whose WoL 155devices (i.e. devices whose "can_wakeup" flags are set) and is created (or
155(wake-on-LAN) feature has been set up with ethtool. It should also default 156removed) by device_set_wakeup_capable().
156to true for devices that don't generate wakeup requests on their own but merely
157forward wakeup requests from one bus to another (like PCI bridges).
158 157
159Whether or not a device is capable of issuing wakeup events is a hardware 158Whether or not a device is capable of issuing wakeup events is a hardware
160matter, and the kernel is responsible for keeping track of it. By contrast, 159matter, and the kernel is responsible for keeping track of it. By contrast,
161whether or not a wakeup-capable device should issue wakeup events is a policy 160whether or not a wakeup-capable device should issue wakeup events is a policy
162decision, and it is managed by user space through a sysfs attribute: the 161decision, and it is managed by user space through a sysfs attribute: the
163power/wakeup file. User space can write the strings "enabled" or "disabled" to 162"power/wakeup" file. User space can write the strings "enabled" or "disabled"
164set or clear the "should_wakeup" flag, respectively. This file is only present 163to it to indicate whether or not, respectively, the device is supposed to signal
165for wakeup-capable devices (i.e. devices whose "can_wakeup" flags are set) 164system wakeup. This file is only present if the "power.wakeup" object exists
166and is created (or removed) by device_set_wakeup_capable(). Reads from the 165for the given device and is created (or removed) along with that object, by
167file will return the corresponding string. 166device_set_wakeup_capable(). Reads from the file will return the corresponding
168 167string.
169The device_may_wakeup() routine returns true only if both flags are set. 168
169The "power/wakeup" file is supposed to contain the "disabled" string initially
170for the majority of devices; the major exceptions are power buttons, keyboards,
171and Ethernet adapters whose WoL (wake-on-LAN) feature has been set up with
172ethtool. It should also default to "enabled" for devices that don't generate
173wakeup requests on their own but merely forward wakeup requests from one bus to
174another (like PCI Express ports).
175
176The device_may_wakeup() routine returns true only if the "power.wakeup" object
177exists and the corresponding "power/wakeup" file contains the string "enabled".
170This information is used by subsystems, like the PCI bus type code, to see 178This information is used by subsystems, like the PCI bus type code, to see
171whether or not to enable the devices' wakeup mechanisms. If device wakeup 179whether or not to enable the devices' wakeup mechanisms. If device wakeup
172mechanisms are enabled or disabled directly by drivers, they also should use 180mechanisms are enabled or disabled directly by drivers, they also should use
173device_may_wakeup() to decide what to do during a system sleep transition. 181device_may_wakeup() to decide what to do during a system sleep transition.
174However for runtime power management, wakeup events should be enabled whenever 182Device drivers, however, are not supposed to call device_set_wakeup_enable()
175the device and driver both support them, regardless of the should_wakeup flag. 183directly in any case.
176 184
185It ought to be noted that system wakeup is conceptually different from "remote
186wakeup" used by runtime power management, although it may be supported by the
187same physical mechanism. Remote wakeup is a feature allowing devices in
188low-power states to trigger specific interrupts to signal conditions in which
189they should be put into the full-power state. Those interrupts may or may not
190be used to signal system wakeup events, depending on the hardware design. On
191some systems it is impossible to trigger them from system sleep states. In any
192case, remote wakeup should always be enabled for runtime power management for
193all devices and drivers that support it.
177 194
178/sys/devices/.../power/control files 195/sys/devices/.../power/control files
179------------------------------------ 196------------------------------------
@@ -249,20 +266,31 @@ for every device before the next phase begins. Not all busses or classes
249support all these callbacks and not all drivers use all the callbacks. The 266support all these callbacks and not all drivers use all the callbacks. The
250various phases always run after tasks have been frozen and before they are 267various phases always run after tasks have been frozen and before they are
251unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have 268unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
252been disabled (except for those marked with the IRQ_WAKEUP flag). 269been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
253 270
254All phases use bus, type, or class callbacks (that is, methods defined in 271All phases use PM domain, bus, type, or class callbacks (that is, methods
255dev->bus->pm, dev->type->pm, or dev->class->pm). These callbacks are mutually 272defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm).
256exclusive, so if the device type provides a struct dev_pm_ops object pointed to 273These callbacks are regarded by the PM core as mutually exclusive. Moreover,
257by its pm field (i.e. both dev->type and dev->type->pm are defined), the 274PM domain callbacks always take precedence over bus, type and class callbacks,
258callbacks included in that object (i.e. dev->type->pm) will be used. Otherwise, 275while type callbacks take precedence over bus and class callbacks, and class
259if the class provides a struct dev_pm_ops object pointed to by its pm field 276callbacks take precedence over bus callbacks. To be precise, the following
260(i.e. both dev->class and dev->class->pm are defined), the PM core will use the 277rules are used to determine which callback to execute in the given phase:
261callbacks from that object (i.e. dev->class->pm). Finally, if the pm fields of 278
262both the device type and class objects are NULL (or those objects do not exist), 279 1. If dev->pm_domain is present, the PM core will attempt to execute the
263the callbacks provided by the bus (that is, the callbacks from dev->bus->pm) 280 callback included in dev->pm_domain->ops. If that callback is not
264will be used (this allows device types to override callbacks provided by bus 281 present, no action will be carried out for the given device.
265types or classes if necessary). 282
283 2. Otherwise, if both dev->type and dev->type->pm are present, the callback
284 included in dev->type->pm will be executed.
285
286 3. Otherwise, if both dev->class and dev->class->pm are present, the
287 callback included in dev->class->pm will be executed.
288
289 4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback
290 included in dev->bus->pm will be executed.
291
292This allows PM domains and device types to override callbacks provided by bus
293types or device classes if necessary.
266 294
267These callbacks may in turn invoke device- or driver-specific methods stored in 295These callbacks may in turn invoke device- or driver-specific methods stored in
268dev->driver->pm, but they don't have to. 296dev->driver->pm, but they don't have to.
@@ -283,9 +311,8 @@ When the system goes into the standby or memory sleep state, the phases are:
283 311
284 After the prepare callback method returns, no new children may be 312 After the prepare callback method returns, no new children may be
285 registered below the device. The method may also prepare the device or 313 registered below the device. The method may also prepare the device or
286 driver in some way for the upcoming system power transition (for 314 driver in some way for the upcoming system power transition, but it
287 example, by allocating additional memory required for this purpose), but 315 should not put the device into a low-power state.
288 it should not put the device into a low-power state.
289 316
290 2. The suspend methods should quiesce the device to stop it from performing 317 2. The suspend methods should quiesce the device to stop it from performing
291 I/O. They also may save the device registers and put it into the 318 I/O. They also may save the device registers and put it into the
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 5336149f831b..c2ae8bf77d46 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -44,25 +44,33 @@ struct dev_pm_ops {
44}; 44};
45 45
46The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks 46The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks
47are executed by the PM core for either the power domain, or the device type 47are executed by the PM core for the device's subsystem that may be either of
48(if the device power domain's struct dev_pm_ops does not exist), or the class 48the following:
49(if the device power domain's and type's struct dev_pm_ops object does not 49
50exist), or the bus type (if the device power domain's, type's and class' 50 1. PM domain of the device, if the device's PM domain object, dev->pm_domain,
51struct dev_pm_ops objects do not exist) of the given device, so the priority 51 is present.
52order of callbacks from high to low is that power domain callbacks, device 52
53type callbacks, class callbacks and bus type callbacks, and the high priority 53 2. Device type of the device, if both dev->type and dev->type->pm are present.
54one will take precedence over low priority one. The bus type, device type and 54
55class callbacks are referred to as subsystem-level callbacks in what follows, 55 3. Device class of the device, if both dev->class and dev->class->pm are
56and generally speaking, the power domain callbacks are used for representing 56 present.
57power domains within a SoC. 57
58 4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
59
60The PM core always checks which callback to use in the order given above, so the
61priority order of callbacks from high to low is: PM domain, device type, class
62and bus type. Moreover, the high-priority one will always take precedence over
63a low-priority one. The PM domain, bus type, device type and class callbacks
64are referred to as subsystem-level callbacks in what follows.
58 65
59By default, the callbacks are always invoked in process context with interrupts 66By default, the callbacks are always invoked in process context with interrupts
60enabled. However, subsystems can use the pm_runtime_irq_safe() helper function 67enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
61to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume() 68to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and
62callbacks should be invoked in atomic context with interrupts disabled. 69->runtime_idle() callbacks may be invoked in atomic context with interrupts
63This implies that these callback routines must not block or sleep, but it also 70disabled for a given device. This implies that the callback routines in
64means that the synchronous helper functions listed at the end of Section 4 can 71question must not block or sleep, but it also means that the synchronous helper
65be used within an interrupt handler or in an atomic context. 72functions listed at the end of Section 4 may be used for that device within an
73interrupt handler or generally in an atomic context.
66 74
67The subsystem-level suspend callback is _entirely_ _responsible_ for handling 75The subsystem-level suspend callback is _entirely_ _responsible_ for handling
68the suspend of the device as appropriate, which may, but need not include 76the suspend of the device as appropriate, which may, but need not include
diff --git a/Documentation/serial/serial-rs485.txt b/Documentation/serial/serial-rs485.txt
index 079cb3df62cf..41c8378c0b2f 100644
--- a/Documentation/serial/serial-rs485.txt
+++ b/Documentation/serial/serial-rs485.txt
@@ -97,15 +97,23 @@
97 97
98 struct serial_rs485 rs485conf; 98 struct serial_rs485 rs485conf;
99 99
100 /* Set RS485 mode: */ 100 /* Enable RS485 mode: */
101 rs485conf.flags |= SER_RS485_ENABLED; 101 rs485conf.flags |= SER_RS485_ENABLED;
102 102
103 /* Set logical level for RTS pin equal to 1 when sending: */
104 rs485conf.flags |= SER_RS485_RTS_ON_SEND;
105 /* or, set logical level for RTS pin equal to 0 when sending: */
106 rs485conf.flags &= ~(SER_RS485_RTS_ON_SEND);
107
108 /* Set logical level for RTS pin equal to 1 after sending: */
109 rs485conf.flags |= SER_RS485_RTS_AFTER_SEND;
110 /* or, set logical level for RTS pin equal to 0 after sending: */
111 rs485conf.flags &= ~(SER_RS485_RTS_AFTER_SEND);
112
103 /* Set rts delay before send, if needed: */ 113 /* Set rts delay before send, if needed: */
104 rs485conf.flags |= SER_RS485_RTS_BEFORE_SEND;
105 rs485conf.delay_rts_before_send = ...; 114 rs485conf.delay_rts_before_send = ...;
106 115
107 /* Set rts delay after send, if needed: */ 116 /* Set rts delay after send, if needed: */
108 rs485conf.flags |= SER_RS485_RTS_AFTER_SEND;
109 rs485conf.delay_rts_after_send = ...; 117 rs485conf.delay_rts_after_send = ...;
110 118
111 /* Set this flag if you want to receive data even whilst sending data */ 119 /* Set this flag if you want to receive data even whilst sending data */
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 4f3443230d89..edad99abec21 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -349,6 +349,7 @@ STAC92HD83*
349 ref Reference board 349 ref Reference board
350 mic-ref Reference board with power management for ports 350 mic-ref Reference board with power management for ports
351 dell-s14 Dell laptop 351 dell-s14 Dell laptop
352 dell-vostro-3500 Dell Vostro 3500 laptop
352 hp HP laptops with (inverted) mute-LED 353 hp HP laptops with (inverted) mute-LED
353 hp-dv7-4000 HP dv-7 4000 354 hp-dv7-4000 HP dv-7 4000
354 auto BIOS setup (default) 355 auto BIOS setup (default)
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt
index 03e2771ddeef..91fee3b45fb8 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/alsa/HD-Audio.txt
@@ -579,7 +579,7 @@ Development Tree
579~~~~~~~~~~~~~~~~ 579~~~~~~~~~~~~~~~~
580The latest development codes for HD-audio are found on sound git tree: 580The latest development codes for HD-audio are found on sound git tree:
581 581
582- git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6.git 582- git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
583 583
584The master branch or for-next branches can be used as the main 584The master branch or for-next branches can be used as the main
585development branches in general while the HD-audio specific patches 585development branches in general while the HD-audio specific patches
@@ -594,7 +594,7 @@ is, installed via the usual spells: configure, make and make
594install(-modules). See INSTALL in the package. The snapshot tarballs 594install(-modules). See INSTALL in the package. The snapshot tarballs
595are found at: 595are found at:
596 596
597- ftp://ftp.kernel.org/pub/linux/kernel/people/tiwai/snapshot/ 597- ftp://ftp.suse.com/pub/people/tiwai/snapshot/
598 598
599 599
600Sending a Bug Report 600Sending a Bug Report
@@ -696,7 +696,7 @@ via hda-verb won't change the mixer value.
696 696
697The hda-verb program is found in the ftp directory: 697The hda-verb program is found in the ftp directory:
698 698
699- ftp://ftp.kernel.org/pub/linux/kernel/people/tiwai/misc/ 699- ftp://ftp.suse.com/pub/people/tiwai/misc/
700 700
701Also a git repository is available: 701Also a git repository is available:
702 702
@@ -764,7 +764,7 @@ operation, the jack plugging simulation, etc.
764 764
765The package is found in: 765The package is found in:
766 766
767- ftp://ftp.kernel.org/pub/linux/kernel/people/tiwai/misc/ 767- ftp://ftp.suse.com/pub/people/tiwai/misc/
768 768
769A git repository is available: 769A git repository is available:
770 770
diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/alsa/soc/machine.txt
index 3e2ec9cbf397..d50c14df3411 100644
--- a/Documentation/sound/alsa/soc/machine.txt
+++ b/Documentation/sound/alsa/soc/machine.txt
@@ -50,8 +50,7 @@ Machine DAI Configuration
50The machine DAI configuration glues all the codec and CPU DAIs together. It can 50The machine DAI configuration glues all the codec and CPU DAIs together. It can
51also be used to set up the DAI system clock and for any machine related DAI 51also be used to set up the DAI system clock and for any machine related DAI
52initialisation e.g. the machine audio map can be connected to the codec audio 52initialisation e.g. the machine audio map can be connected to the codec audio
53map, unconnected codec pins can be set as such. Please see corgi.c, spitz.c 53map, unconnected codec pins can be set as such.
54for examples.
55 54
56struct snd_soc_dai_link is used to set up each DAI in your machine. e.g. 55struct snd_soc_dai_link is used to set up each DAI in your machine. e.g.
57 56
@@ -83,8 +82,7 @@ Machine Power Map
83The machine driver can optionally extend the codec power map and to become an 82The machine driver can optionally extend the codec power map and to become an
84audio power map of the audio subsystem. This allows for automatic power up/down 83audio power map of the audio subsystem. This allows for automatic power up/down
85of speaker/HP amplifiers, etc. Codec pins can be connected to the machines jack 84of speaker/HP amplifiers, etc. Codec pins can be connected to the machines jack
86sockets in the machine init function. See soc/pxa/spitz.c and dapm.txt for 85sockets in the machine init function.
87details.
88 86
89 87
90Machine Controls 88Machine Controls
diff --git a/Documentation/usb/linux-cdc-acm.inf b/Documentation/usb/linux-cdc-acm.inf
index 37a02ce54841..f0ffc27d4c0a 100644
--- a/Documentation/usb/linux-cdc-acm.inf
+++ b/Documentation/usb/linux-cdc-acm.inf
@@ -90,10 +90,10 @@ ServiceBinary=%12%\USBSER.sys
90[SourceDisksFiles] 90[SourceDisksFiles]
91[SourceDisksNames] 91[SourceDisksNames]
92[DeviceList] 92[DeviceList]
93%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02 93%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
94 94
95[DeviceList.NTamd64] 95[DeviceList.NTamd64]
96%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02 96%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
97 97
98 98
99;------------------------------------------------------------------------------ 99;------------------------------------------------------------------------------
diff --git a/Kbuild b/Kbuild
index 4caab4f6cba7..b8b708ad6dc3 100644
--- a/Kbuild
+++ b/Kbuild
@@ -92,7 +92,7 @@ always += missing-syscalls
92targets += missing-syscalls 92targets += missing-syscalls
93 93
94quiet_cmd_syscalls = CALL $< 94quiet_cmd_syscalls = CALL $<
95 cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) 95 cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags)
96 96
97missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE 97missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE
98 $(call cmd,syscalls) 98 $(call cmd,syscalls)
diff --git a/MAINTAINERS b/MAINTAINERS
index 4808256446f2..b9db108f01c8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -511,8 +511,8 @@ M: Joerg Roedel <joerg.roedel@amd.com>
511L: iommu@lists.linux-foundation.org 511L: iommu@lists.linux-foundation.org
512T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git 512T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git
513S: Supported 513S: Supported
514F: arch/x86/kernel/amd_iommu*.c 514F: drivers/iommu/amd_iommu*.[ch]
515F: arch/x86/include/asm/amd_iommu*.h 515F: include/linux/amd-iommu.h
516 516
517AMD MICROCODE UPDATE SUPPORT 517AMD MICROCODE UPDATE SUPPORT
518M: Andreas Herrmann <andreas.herrmann3@amd.com> 518M: Andreas Herrmann <andreas.herrmann3@amd.com>
@@ -789,6 +789,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
789S: Maintained 789S: Maintained
790T: git git://git.pengutronix.de/git/imx/linux-2.6.git 790T: git git://git.pengutronix.de/git/imx/linux-2.6.git
791F: arch/arm/mach-mx*/ 791F: arch/arm/mach-mx*/
792F: arch/arm/mach-imx/
792F: arch/arm/plat-mxc/ 793F: arch/arm/plat-mxc/
793 794
794ARM/FREESCALE IMX51 795ARM/FREESCALE IMX51
@@ -804,6 +805,13 @@ S: Maintained
804T: git git://git.linaro.org/people/shawnguo/linux-2.6.git 805T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
805F: arch/arm/mach-imx/*imx6* 806F: arch/arm/mach-imx/*imx6*
806 807
808ARM/FREESCALE MXS ARM ARCHITECTURE
809M: Shawn Guo <shawn.guo@linaro.org>
810L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
811S: Maintained
812T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
813F: arch/arm/mach-mxs/
814
807ARM/GLOMATION GESBC9312SX MACHINE SUPPORT 815ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
808M: Lennert Buytenhek <kernel@wantstofly.org> 816M: Lennert Buytenhek <kernel@wantstofly.org>
809L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 817L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1046,35 +1054,18 @@ ARM/SAMSUNG ARM ARCHITECTURES
1046M: Ben Dooks <ben-linux@fluff.org> 1054M: Ben Dooks <ben-linux@fluff.org>
1047M: Kukjin Kim <kgene.kim@samsung.com> 1055M: Kukjin Kim <kgene.kim@samsung.com>
1048L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1056L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1057L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
1049W: http://www.fluff.org/ben/linux/ 1058W: http://www.fluff.org/ben/linux/
1050S: Maintained 1059S: Maintained
1051F: arch/arm/plat-samsung/ 1060F: arch/arm/plat-samsung/
1052F: arch/arm/plat-s3c24xx/ 1061F: arch/arm/plat-s3c24xx/
1053F: arch/arm/plat-s5p/ 1062F: arch/arm/plat-s5p/
1063F: arch/arm/mach-s3c24*/
1064F: arch/arm/mach-s3c64xx/
1054F: drivers/*/*s3c2410* 1065F: drivers/*/*s3c2410*
1055F: drivers/*/*/*s3c2410* 1066F: drivers/*/*/*s3c2410*
1056 1067F: drivers/spi/spi-s3c*
1057ARM/S3C2410 ARM ARCHITECTURE 1068F: sound/soc/samsung/*
1058M: Ben Dooks <ben-linux@fluff.org>
1059L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1060W: http://www.fluff.org/ben/linux/
1061S: Maintained
1062F: arch/arm/mach-s3c2410/
1063
1064ARM/S3C244x ARM ARCHITECTURE
1065M: Ben Dooks <ben-linux@fluff.org>
1066L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1067W: http://www.fluff.org/ben/linux/
1068S: Maintained
1069F: arch/arm/mach-s3c2440/
1070F: arch/arm/mach-s3c2443/
1071
1072ARM/S3C64xx ARM ARCHITECTURE
1073M: Ben Dooks <ben-linux@fluff.org>
1074L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1075W: http://www.fluff.org/ben/linux/
1076S: Maintained
1077F: arch/arm/mach-s3c64xx/
1078 1069
1079ARM/S5P EXYNOS ARM ARCHITECTURES 1070ARM/S5P EXYNOS ARM ARCHITECTURES
1080M: Kukjin Kim <kgene.kim@samsung.com> 1071M: Kukjin Kim <kgene.kim@samsung.com>
@@ -1106,6 +1097,7 @@ F: drivers/media/video/s5p-fimc/
1106ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT 1097ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
1107M: Kyungmin Park <kyungmin.park@samsung.com> 1098M: Kyungmin Park <kyungmin.park@samsung.com>
1108M: Kamil Debski <k.debski@samsung.com> 1099M: Kamil Debski <k.debski@samsung.com>
1100M: Jeongtae Park <jtp.park@samsung.com>
1109L: linux-arm-kernel@lists.infradead.org 1101L: linux-arm-kernel@lists.infradead.org
1110L: linux-media@vger.kernel.org 1102L: linux-media@vger.kernel.org
1111S: Maintained 1103S: Maintained
@@ -1788,6 +1780,14 @@ F: include/net/cfg80211.h
1788F: net/wireless/* 1780F: net/wireless/*
1789X: net/wireless/wext* 1781X: net/wireless/wext*
1790 1782
1783CHAR and MISC DRIVERS
1784M: Arnd Bergmann <arnd@arndb.de>
1785M: Greg Kroah-Hartman <greg@kroah.com>
1786T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
1787S: Maintained
1788F: drivers/char/*
1789F: drivers/misc/*
1790
1791CHECKPATCH 1791CHECKPATCH
1792M: Andy Whitcroft <apw@canonical.com> 1792M: Andy Whitcroft <apw@canonical.com>
1793S: Supported 1793S: Supported
@@ -1926,9 +1926,11 @@ S: Maintained
1926F: drivers/connector/ 1926F: drivers/connector/
1927 1927
1928CONTROL GROUPS (CGROUPS) 1928CONTROL GROUPS (CGROUPS)
1929M: Paul Menage <paul@paulmenage.org> 1929M: Tejun Heo <tj@kernel.org>
1930M: Li Zefan <lizf@cn.fujitsu.com> 1930M: Li Zefan <lizf@cn.fujitsu.com>
1931L: containers@lists.linux-foundation.org 1931L: containers@lists.linux-foundation.org
1932L: cgroups@vger.kernel.org
1933T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
1932S: Maintained 1934S: Maintained
1933F: include/linux/cgroup* 1935F: include/linux/cgroup*
1934F: kernel/cgroup* 1936F: kernel/cgroup*
@@ -2342,6 +2344,13 @@ S: Supported
2342F: drivers/gpu/drm/i915 2344F: drivers/gpu/drm/i915
2343F: include/drm/i915* 2345F: include/drm/i915*
2344 2346
2347DRM DRIVERS FOR EXYNOS
2348M: Inki Dae <inki.dae@samsung.com>
2349L: dri-devel@lists.freedesktop.org
2350S: Supported
2351F: drivers/gpu/drm/exynos
2352F: include/drm/exynos*
2353
2345DSCC4 DRIVER 2354DSCC4 DRIVER
2346M: Francois Romieu <romieu@fr.zoreil.com> 2355M: Francois Romieu <romieu@fr.zoreil.com>
2347L: netdev@vger.kernel.org 2356L: netdev@vger.kernel.org
@@ -2576,7 +2585,7 @@ S: Maintained
2576F: drivers/net/ethernet/i825xx/eexpress.* 2585F: drivers/net/ethernet/i825xx/eexpress.*
2577 2586
2578ETHERNET BRIDGE 2587ETHERNET BRIDGE
2579M: Stephen Hemminger <shemminger@linux-foundation.org> 2588M: Stephen Hemminger <shemminger@vyatta.com>
2580L: bridge@lists.linux-foundation.org 2589L: bridge@lists.linux-foundation.org
2581L: netdev@vger.kernel.org 2590L: netdev@vger.kernel.org
2582W: http://www.linuxfoundation.org/en/Net:Bridge 2591W: http://www.linuxfoundation.org/en/Net:Bridge
@@ -3710,7 +3719,7 @@ F: fs/jbd2/
3710F: include/linux/jbd2.h 3719F: include/linux/jbd2.h
3711 3720
3712JSM Neo PCI based serial card 3721JSM Neo PCI based serial card
3713M: Breno Leitao <leitao@linux.vnet.ibm.com> 3722M: Lucas Tavares <lucaskt@linux.vnet.ibm.com>
3714L: linux-serial@vger.kernel.org 3723L: linux-serial@vger.kernel.org
3715S: Maintained 3724S: Maintained
3716F: drivers/tty/serial/jsm/ 3725F: drivers/tty/serial/jsm/
@@ -4293,9 +4302,11 @@ F: include/linux/mm.h
4293F: mm/ 4302F: mm/
4294 4303
4295MEMORY RESOURCE CONTROLLER 4304MEMORY RESOURCE CONTROLLER
4305M: Johannes Weiner <hannes@cmpxchg.org>
4306M: Michal Hocko <mhocko@suse.cz>
4296M: Balbir Singh <bsingharora@gmail.com> 4307M: Balbir Singh <bsingharora@gmail.com>
4297M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
4298M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> 4308M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
4309L: cgroups@vger.kernel.org
4299L: linux-mm@kvack.org 4310L: linux-mm@kvack.org
4300S: Maintained 4311S: Maintained
4301F: mm/memcontrol.c 4312F: mm/memcontrol.c
@@ -4329,7 +4340,7 @@ MIPS
4329M: Ralf Baechle <ralf@linux-mips.org> 4340M: Ralf Baechle <ralf@linux-mips.org>
4330L: linux-mips@linux-mips.org 4341L: linux-mips@linux-mips.org
4331W: http://www.linux-mips.org/ 4342W: http://www.linux-mips.org/
4332T: git git://git.linux-mips.org/pub/scm/linux.git 4343T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
4333Q: http://patchwork.linux-mips.org/project/linux-mips/list/ 4344Q: http://patchwork.linux-mips.org/project/linux-mips/list/
4334S: Supported 4345S: Supported
4335F: Documentation/mips/ 4346F: Documentation/mips/
@@ -4462,7 +4473,7 @@ S: Supported
4462F: drivers/infiniband/hw/nes/ 4473F: drivers/infiniband/hw/nes/
4463 4474
4464NETEM NETWORK EMULATOR 4475NETEM NETWORK EMULATOR
4465M: Stephen Hemminger <shemminger@linux-foundation.org> 4476M: Stephen Hemminger <shemminger@vyatta.com>
4466L: netem@lists.linux-foundation.org 4477L: netem@lists.linux-foundation.org
4467S: Maintained 4478S: Maintained
4468F: net/sched/sch_netem.c 4479F: net/sched/sch_netem.c
@@ -4939,7 +4950,7 @@ F: drivers/char/ppdev.c
4939F: include/linux/ppdev.h 4950F: include/linux/ppdev.h
4940 4951
4941PARAVIRT_OPS INTERFACE 4952PARAVIRT_OPS INTERFACE
4942M: Jeremy Fitzhardinge <jeremy@xensource.com> 4953M: Jeremy Fitzhardinge <jeremy@goop.org>
4943M: Chris Wright <chrisw@sous-sol.org> 4954M: Chris Wright <chrisw@sous-sol.org>
4944M: Alok Kataria <akataria@vmware.com> 4955M: Alok Kataria <akataria@vmware.com>
4945M: Rusty Russell <rusty@rustcorp.com.au> 4956M: Rusty Russell <rusty@rustcorp.com.au>
@@ -5648,7 +5659,6 @@ F: drivers/media/video/*7146*
5648F: include/media/*7146* 5659F: include/media/*7146*
5649 5660
5650SAMSUNG AUDIO (ASoC) DRIVERS 5661SAMSUNG AUDIO (ASoC) DRIVERS
5651M: Jassi Brar <jassisinghbrar@gmail.com>
5652M: Sangbeom Kim <sbkim73@samsung.com> 5662M: Sangbeom Kim <sbkim73@samsung.com>
5653L: alsa-devel@alsa-project.org (moderated for non-subscribers) 5663L: alsa-devel@alsa-project.org (moderated for non-subscribers)
5654S: Supported 5664S: Supported
@@ -5977,7 +5987,7 @@ S: Maintained
5977F: drivers/usb/misc/sisusbvga/ 5987F: drivers/usb/misc/sisusbvga/
5978 5988
5979SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS 5989SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
5980M: Stephen Hemminger <shemminger@linux-foundation.org> 5990M: Stephen Hemminger <shemminger@vyatta.com>
5981L: netdev@vger.kernel.org 5991L: netdev@vger.kernel.org
5982S: Maintained 5992S: Maintained
5983F: drivers/net/ethernet/marvell/sk* 5993F: drivers/net/ethernet/marvell/sk*
@@ -6122,7 +6132,7 @@ F: sound/
6122SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) 6132SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
6123M: Liam Girdwood <lrg@ti.com> 6133M: Liam Girdwood <lrg@ti.com>
6124M: Mark Brown <broonie@opensource.wolfsonmicro.com> 6134M: Mark Brown <broonie@opensource.wolfsonmicro.com>
6125T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound-2.6.git 6135T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
6126L: alsa-devel@alsa-project.org (moderated for non-subscribers) 6136L: alsa-devel@alsa-project.org (moderated for non-subscribers)
6127W: http://alsa-project.org/main/index.php/ASoC 6137W: http://alsa-project.org/main/index.php/ASoC
6128S: Supported 6138S: Supported
@@ -7391,8 +7401,8 @@ S: Maintained
7391F: arch/x86/kernel/cpu/mcheck/* 7401F: arch/x86/kernel/cpu/mcheck/*
7392 7402
7393XEN HYPERVISOR INTERFACE 7403XEN HYPERVISOR INTERFACE
7394M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
7395M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 7404M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
7405M: Jeremy Fitzhardinge <jeremy@goop.org>
7396L: xen-devel@lists.xensource.com (moderated for non-subscribers) 7406L: xen-devel@lists.xensource.com (moderated for non-subscribers)
7397L: virtualization@lists.linux-foundation.org 7407L: virtualization@lists.linux-foundation.org
7398S: Supported 7408S: Supported
@@ -7425,7 +7435,8 @@ F: drivers/xen/*swiotlb*
7425 7435
7426XFS FILESYSTEM 7436XFS FILESYSTEM
7427P: Silicon Graphics Inc 7437P: Silicon Graphics Inc
7428M: Alex Elder <aelder@sgi.com> 7438M: Ben Myers <bpm@sgi.com>
7439M: Alex Elder <elder@kernel.org>
7429M: xfs-masters@oss.sgi.com 7440M: xfs-masters@oss.sgi.com
7430L: xfs@oss.sgi.com 7441L: xfs@oss.sgi.com
7431W: http://oss.sgi.com/projects/xfs 7442W: http://oss.sgi.com/projects/xfs
diff --git a/Makefile b/Makefile
index 361e4f00e6b9..a43733df3978 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 2 2PATCHLEVEL = 2
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc6
5NAME = Saber-toothed Squirrel 5NAME = Saber-toothed Squirrel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 44789eff983f..776d76b8cb69 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -220,8 +220,9 @@ config NEED_MACH_MEMORY_H
220 be avoided when possible. 220 be avoided when possible.
221 221
222config PHYS_OFFSET 222config PHYS_OFFSET
223 hex "Physical address of main memory" 223 hex "Physical address of main memory" if MMU
224 depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H 224 depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
225 default DRAM_BASE if !MMU
225 help 226 help
226 Please provide the physical address corresponding to the 227 Please provide the physical address corresponding to the
227 location of main memory in your system. 228 location of main memory in your system.
@@ -1231,7 +1232,7 @@ config ARM_ERRATA_742231
1231 capabilities of the processor. 1232 capabilities of the processor.
1232 1233
1233config PL310_ERRATA_588369 1234config PL310_ERRATA_588369
1234 bool "Clean & Invalidate maintenance operations do not invalidate clean lines" 1235 bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
1235 depends on CACHE_L2X0 1236 depends on CACHE_L2X0
1236 help 1237 help
1237 The PL310 L2 cache controller implements three types of Clean & 1238 The PL310 L2 cache controller implements three types of Clean &
@@ -1256,7 +1257,7 @@ config ARM_ERRATA_720789
1256 entries regardless of the ASID. 1257 entries regardless of the ASID.
1257 1258
1258config PL310_ERRATA_727915 1259config PL310_ERRATA_727915
1259 bool "Background Clean & Invalidate by Way operation can cause data corruption" 1260 bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
1260 depends on CACHE_L2X0 1261 depends on CACHE_L2X0
1261 help 1262 help
1262 PL310 implements the Clean & Invalidate by Way L2 cache maintenance 1263 PL310 implements the Clean & Invalidate by Way L2 cache maintenance
@@ -1289,8 +1290,8 @@ config ARM_ERRATA_751472
1289 operation is received by a CPU before the ICIALLUIS has completed, 1290 operation is received by a CPU before the ICIALLUIS has completed,
1290 potentially leading to corrupted entries in the cache or TLB. 1291 potentially leading to corrupted entries in the cache or TLB.
1291 1292
1292config ARM_ERRATA_753970 1293config PL310_ERRATA_753970
1293 bool "ARM errata: cache sync operation may be faulty" 1294 bool "PL310 errata: cache sync operation may be faulty"
1294 depends on CACHE_PL310 1295 depends on CACHE_PL310
1295 help 1296 help
1296 This option enables the workaround for the 753970 PL310 (r3p0) erratum. 1297 This option enables the workaround for the 753970 PL310 (r3p0) erratum.
@@ -1352,6 +1353,18 @@ config ARM_ERRATA_764369
1352 relevant cache maintenance functions and sets a specific bit 1353 relevant cache maintenance functions and sets a specific bit
1353 in the diagnostic control register of the SCU. 1354 in the diagnostic control register of the SCU.
1354 1355
1356config PL310_ERRATA_769419
1357 bool "PL310 errata: no automatic Store Buffer drain"
1358 depends on CACHE_L2X0
1359 help
1360 On revisions of the PL310 prior to r3p2, the Store Buffer does
1361 not automatically drain. This can cause normal, non-cacheable
1362 writes to be retained when the memory system is idle, leading
1363 to suboptimal I/O performance for drivers using coherent DMA.
1364 This option adds a write barrier to the cpu_idle loop so that,
1365 on systems with an outer cache, the store buffer is drained
1366 explicitly.
1367
1355endmenu 1368endmenu
1356 1369
1357source "arch/arm/common/Kconfig" 1370source "arch/arm/common/Kconfig"
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 176062ac7f07..5df26a9976a2 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -65,6 +65,8 @@ $(obj)/%.dtb: $(src)/dts/%.dts
65 65
66$(obj)/dtbs: $(addprefix $(obj)/, $(dtb-y)) 66$(obj)/dtbs: $(addprefix $(obj)/, $(dtb-y))
67 67
68clean-files := *.dtb
69
68quiet_cmd_uimage = UIMAGE $@ 70quiet_cmd_uimage = UIMAGE $@
69 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A arm -O linux -T kernel \ 71 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A arm -O linux -T kernel \
70 -C none -a $(LOADADDR) -e $(STARTADDR) \ 72 -C none -a $(LOADADDR) -e $(STARTADDR) \
diff --git a/arch/arm/boot/dts/tegra-ventana.dts b/arch/arm/boot/dts/tegra-ventana.dts
index 9b29a623aaf1..3f9abd6b6964 100644
--- a/arch/arm/boot/dts/tegra-ventana.dts
+++ b/arch/arm/boot/dts/tegra-ventana.dts
@@ -22,11 +22,10 @@
22 sdhci@c8000400 { 22 sdhci@c8000400 {
23 cd-gpios = <&gpio 69 0>; /* gpio PI5 */ 23 cd-gpios = <&gpio 69 0>; /* gpio PI5 */
24 wp-gpios = <&gpio 57 0>; /* gpio PH1 */ 24 wp-gpios = <&gpio 57 0>; /* gpio PH1 */
25 power-gpios = <&gpio 155 0>; /* gpio PT3 */ 25 power-gpios = <&gpio 70 0>; /* gpio PI6 */
26 }; 26 };
27 27
28 sdhci@c8000600 { 28 sdhci@c8000600 {
29 power-gpios = <&gpio 70 0>; /* gpio PI6 */
30 support-8bit; 29 support-8bit;
31 }; 30 };
32}; 31};
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 0e6ae470c94f..410a546060a2 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -526,7 +526,8 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
526 sizeof(u32)); 526 sizeof(u32));
527 BUG_ON(!gic->saved_ppi_conf); 527 BUG_ON(!gic->saved_ppi_conf);
528 528
529 cpu_pm_register_notifier(&gic_notifier_block); 529 if (gic == &gic_data[0])
530 cpu_pm_register_notifier(&gic_notifier_block);
530} 531}
531#else 532#else
532static void __init gic_pm_init(struct gic_chip_data *gic) 533static void __init gic_pm_init(struct gic_chip_data *gic)
@@ -581,13 +582,16 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
581 * For primary GICs, skip over SGIs. 582 * For primary GICs, skip over SGIs.
582 * For secondary GICs, skip over PPIs, too. 583 * For secondary GICs, skip over PPIs, too.
583 */ 584 */
585 domain->hwirq_base = 32;
584 if (gic_nr == 0) { 586 if (gic_nr == 0) {
585 gic_cpu_base_addr = cpu_base; 587 gic_cpu_base_addr = cpu_base;
586 domain->hwirq_base = 16; 588
587 if (irq_start > 0) 589 if ((irq_start & 31) > 0) {
588 irq_start = (irq_start & ~31) + 16; 590 domain->hwirq_base = 16;
589 } else 591 if (irq_start != -1)
590 domain->hwirq_base = 32; 592 irq_start = (irq_start & ~31) + 16;
593 }
594 }
591 595
592 /* 596 /*
593 * Find out how many interrupts are supported. 597 * Find out how many interrupts are supported.
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
index 7129cfbdacd6..f407a6b35d3d 100644
--- a/arch/arm/common/pl330.c
+++ b/arch/arm/common/pl330.c
@@ -1211,8 +1211,8 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1211 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT); 1211 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1212 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT); 1212 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1213 1213
1214 ccr |= (rqc->dcctl << CC_SRCCCTRL_SHFT); 1214 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1215 ccr |= (rqc->scctl << CC_DSTCCTRL_SHFT); 1215 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1216 1216
1217 ccr |= (rqc->swap << CC_SWAP_SHFT); 1217 ccr |= (rqc->swap << CC_SWAP_SHFT);
1218 1218
@@ -1623,6 +1623,11 @@ static inline int _alloc_event(struct pl330_thread *thrd)
1623 return -1; 1623 return -1;
1624} 1624}
1625 1625
1626static bool _chan_ns(const struct pl330_info *pi, int i)
1627{
1628 return pi->pcfg.irq_ns & (1 << i);
1629}
1630
1626/* Upon success, returns IdentityToken for the 1631/* Upon success, returns IdentityToken for the
1627 * allocated channel, NULL otherwise. 1632 * allocated channel, NULL otherwise.
1628 */ 1633 */
@@ -1647,7 +1652,8 @@ void *pl330_request_channel(const struct pl330_info *pi)
1647 1652
1648 for (i = 0; i < chans; i++) { 1653 for (i = 0; i < chans; i++) {
1649 thrd = &pl330->channels[i]; 1654 thrd = &pl330->channels[i];
1650 if (thrd->free) { 1655 if ((thrd->free) && (!_manager_ns(thrd) ||
1656 _chan_ns(pi, i))) {
1651 thrd->ev = _alloc_event(thrd); 1657 thrd->ev = _alloc_event(thrd);
1652 if (thrd->ev >= 0) { 1658 if (thrd->ev >= 0) {
1653 thrd->free = false; 1659 thrd->free = false;
diff --git a/arch/arm/configs/at91cap9adk_defconfig b/arch/arm/configs/at91cap9_defconfig
index ffb1edd93363..8826eb218e73 100644
--- a/arch/arm/configs/at91cap9adk_defconfig
+++ b/arch/arm/configs/at91cap9_defconfig
@@ -38,7 +38,6 @@ CONFIG_IP_PNP_RARP=y
38# CONFIG_IPV6 is not set 38# CONFIG_IPV6 is not set
39CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 39CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
40CONFIG_MTD=y 40CONFIG_MTD=y
41CONFIG_MTD_PARTITIONS=y
42CONFIG_MTD_CMDLINE_PARTS=y 41CONFIG_MTD_CMDLINE_PARTS=y
43CONFIG_MTD_CHAR=y 42CONFIG_MTD_CHAR=y
44CONFIG_MTD_BLOCK=y 43CONFIG_MTD_BLOCK=y
@@ -52,16 +51,12 @@ CONFIG_MTD_NAND_ATMEL=y
52CONFIG_BLK_DEV_LOOP=y 51CONFIG_BLK_DEV_LOOP=y
53CONFIG_BLK_DEV_RAM=y 52CONFIG_BLK_DEV_RAM=y
54CONFIG_BLK_DEV_RAM_SIZE=8192 53CONFIG_BLK_DEV_RAM_SIZE=8192
55CONFIG_ATMEL_SSC=y
56CONFIG_SCSI=y 54CONFIG_SCSI=y
57CONFIG_BLK_DEV_SD=y 55CONFIG_BLK_DEV_SD=y
58CONFIG_SCSI_MULTI_LUN=y 56CONFIG_SCSI_MULTI_LUN=y
59CONFIG_NETDEVICES=y 57CONFIG_NETDEVICES=y
60CONFIG_NET_ETHERNET=y
61CONFIG_MII=y 58CONFIG_MII=y
62CONFIG_MACB=y 59CONFIG_MACB=y
63# CONFIG_NETDEV_1000 is not set
64# CONFIG_NETDEV_10000 is not set
65# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 60# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
66CONFIG_INPUT_EVDEV=y 61CONFIG_INPUT_EVDEV=y
67# CONFIG_INPUT_KEYBOARD is not set 62# CONFIG_INPUT_KEYBOARD is not set
@@ -81,7 +76,6 @@ CONFIG_WATCHDOG=y
81CONFIG_WATCHDOG_NOWAYOUT=y 76CONFIG_WATCHDOG_NOWAYOUT=y
82CONFIG_FB=y 77CONFIG_FB=y
83CONFIG_FB_ATMEL=y 78CONFIG_FB_ATMEL=y
84# CONFIG_VGA_CONSOLE is not set
85CONFIG_LOGO=y 79CONFIG_LOGO=y
86# CONFIG_LOGO_LINUX_MONO is not set 80# CONFIG_LOGO_LINUX_MONO is not set
87# CONFIG_LOGO_LINUX_CLUT224 is not set 81# CONFIG_LOGO_LINUX_CLUT224 is not set
@@ -99,7 +93,6 @@ CONFIG_MMC_AT91=m
99CONFIG_RTC_CLASS=y 93CONFIG_RTC_CLASS=y
100CONFIG_RTC_DRV_AT91SAM9=y 94CONFIG_RTC_DRV_AT91SAM9=y
101CONFIG_EXT2_FS=y 95CONFIG_EXT2_FS=y
102CONFIG_INOTIFY=y
103CONFIG_VFAT_FS=y 96CONFIG_VFAT_FS=y
104CONFIG_TMPFS=y 97CONFIG_TMPFS=y
105CONFIG_JFFS2_FS=y 98CONFIG_JFFS2_FS=y
diff --git a/arch/arm/configs/at91rm9200_defconfig b/arch/arm/configs/at91rm9200_defconfig
index 38cb7c985426..bbe4e1a1f5d8 100644
--- a/arch/arm/configs/at91rm9200_defconfig
+++ b/arch/arm/configs/at91rm9200_defconfig
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
5CONFIG_IKCONFIG=y 5CONFIG_IKCONFIG=y
6CONFIG_IKCONFIG_PROC=y 6CONFIG_IKCONFIG_PROC=y
7CONFIG_LOG_BUF_SHIFT=14 7CONFIG_LOG_BUF_SHIFT=14
8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y 8CONFIG_BLK_DEV_INITRD=y
10CONFIG_MODULES=y 9CONFIG_MODULES=y
11CONFIG_MODULE_FORCE_LOAD=y 10CONFIG_MODULE_FORCE_LOAD=y
@@ -56,7 +55,6 @@ CONFIG_IP_PNP=y
56CONFIG_IP_PNP_DHCP=y 55CONFIG_IP_PNP_DHCP=y
57CONFIG_IP_PNP_BOOTP=y 56CONFIG_IP_PNP_BOOTP=y
58CONFIG_NET_IPIP=m 57CONFIG_NET_IPIP=m
59CONFIG_NET_IPGRE=m
60CONFIG_INET_AH=m 58CONFIG_INET_AH=m
61CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
62CONFIG_INET_IPCOMP=m 60CONFIG_INET_IPCOMP=m
@@ -75,18 +73,8 @@ CONFIG_IPV6_TUNNEL=m
75CONFIG_BRIDGE=m 73CONFIG_BRIDGE=m
76CONFIG_VLAN_8021Q=m 74CONFIG_VLAN_8021Q=m
77CONFIG_BT=m 75CONFIG_BT=m
78CONFIG_BT_L2CAP=m
79CONFIG_BT_SCO=m
80CONFIG_BT_RFCOMM=m
81CONFIG_BT_RFCOMM_TTY=y
82CONFIG_BT_BNEP=m
83CONFIG_BT_BNEP_MC_FILTER=y
84CONFIG_BT_BNEP_PROTO_FILTER=y
85CONFIG_BT_HIDP=m
86CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 76CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
87CONFIG_MTD=y 77CONFIG_MTD=y
88CONFIG_MTD_CONCAT=y
89CONFIG_MTD_PARTITIONS=y
90CONFIG_MTD_CMDLINE_PARTS=y 78CONFIG_MTD_CMDLINE_PARTS=y
91CONFIG_MTD_AFS_PARTS=y 79CONFIG_MTD_AFS_PARTS=y
92CONFIG_MTD_CHAR=y 80CONFIG_MTD_CHAR=y
@@ -108,8 +96,6 @@ CONFIG_BLK_DEV_LOOP=y
108CONFIG_BLK_DEV_NBD=y 96CONFIG_BLK_DEV_NBD=y
109CONFIG_BLK_DEV_RAM=y 97CONFIG_BLK_DEV_RAM=y
110CONFIG_BLK_DEV_RAM_SIZE=8192 98CONFIG_BLK_DEV_RAM_SIZE=8192
111CONFIG_ATMEL_TCLIB=y
112CONFIG_EEPROM_LEGACY=m
113CONFIG_SCSI=y 99CONFIG_SCSI=y
114CONFIG_BLK_DEV_SD=y 100CONFIG_BLK_DEV_SD=y
115CONFIG_BLK_DEV_SR=m 101CONFIG_BLK_DEV_SR=m
@@ -119,14 +105,23 @@ CONFIG_SCSI_MULTI_LUN=y
119# CONFIG_SCSI_LOWLEVEL is not set 105# CONFIG_SCSI_LOWLEVEL is not set
120CONFIG_NETDEVICES=y 106CONFIG_NETDEVICES=y
121CONFIG_TUN=m 107CONFIG_TUN=m
108CONFIG_ARM_AT91_ETHER=y
122CONFIG_PHYLIB=y 109CONFIG_PHYLIB=y
123CONFIG_DAVICOM_PHY=y 110CONFIG_DAVICOM_PHY=y
124CONFIG_SMSC_PHY=y 111CONFIG_SMSC_PHY=y
125CONFIG_MICREL_PHY=y 112CONFIG_MICREL_PHY=y
126CONFIG_NET_ETHERNET=y 113CONFIG_PPP=y
127CONFIG_ARM_AT91_ETHER=y 114CONFIG_PPP_BSDCOMP=y
128# CONFIG_NETDEV_1000 is not set 115CONFIG_PPP_DEFLATE=y
129# CONFIG_NETDEV_10000 is not set 116CONFIG_PPP_FILTER=y
117CONFIG_PPP_MPPE=m
118CONFIG_PPP_MULTILINK=y
119CONFIG_PPPOE=m
120CONFIG_PPP_ASYNC=y
121CONFIG_SLIP=m
122CONFIG_SLIP_COMPRESSED=y
123CONFIG_SLIP_SMART=y
124CONFIG_SLIP_MODE_SLIP6=y
130CONFIG_USB_CATC=m 125CONFIG_USB_CATC=m
131CONFIG_USB_KAWETH=m 126CONFIG_USB_KAWETH=m
132CONFIG_USB_PEGASUS=m 127CONFIG_USB_PEGASUS=m
@@ -139,18 +134,6 @@ CONFIG_USB_NET_RNDIS_HOST=m
139CONFIG_USB_ALI_M5632=y 134CONFIG_USB_ALI_M5632=y
140CONFIG_USB_AN2720=y 135CONFIG_USB_AN2720=y
141CONFIG_USB_EPSON2888=y 136CONFIG_USB_EPSON2888=y
142CONFIG_PPP=y
143CONFIG_PPP_MULTILINK=y
144CONFIG_PPP_FILTER=y
145CONFIG_PPP_ASYNC=y
146CONFIG_PPP_DEFLATE=y
147CONFIG_PPP_BSDCOMP=y
148CONFIG_PPP_MPPE=m
149CONFIG_PPPOE=m
150CONFIG_SLIP=m
151CONFIG_SLIP_COMPRESSED=y
152CONFIG_SLIP_SMART=y
153CONFIG_SLIP_MODE_SLIP6=y
154# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 137# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
155CONFIG_INPUT_MOUSEDEV_SCREEN_X=640 138CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
156CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480 139CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
@@ -158,9 +141,9 @@ CONFIG_INPUT_EVDEV=y
158CONFIG_KEYBOARD_GPIO=y 141CONFIG_KEYBOARD_GPIO=y
159# CONFIG_INPUT_MOUSE is not set 142# CONFIG_INPUT_MOUSE is not set
160CONFIG_INPUT_TOUCHSCREEN=y 143CONFIG_INPUT_TOUCHSCREEN=y
144CONFIG_LEGACY_PTY_COUNT=32
161CONFIG_SERIAL_ATMEL=y 145CONFIG_SERIAL_ATMEL=y
162CONFIG_SERIAL_ATMEL_CONSOLE=y 146CONFIG_SERIAL_ATMEL_CONSOLE=y
163CONFIG_LEGACY_PTY_COUNT=32
164CONFIG_HW_RANDOM=y 147CONFIG_HW_RANDOM=y
165CONFIG_I2C=y 148CONFIG_I2C=y
166CONFIG_I2C_CHARDEV=y 149CONFIG_I2C_CHARDEV=y
@@ -290,7 +273,6 @@ CONFIG_NFS_V3_ACL=y
290CONFIG_NFS_V4=y 273CONFIG_NFS_V4=y
291CONFIG_ROOT_NFS=y 274CONFIG_ROOT_NFS=y
292CONFIG_NFSD=y 275CONFIG_NFSD=y
293CONFIG_SMB_FS=m
294CONFIG_CIFS=m 276CONFIG_CIFS=m
295CONFIG_PARTITION_ADVANCED=y 277CONFIG_PARTITION_ADVANCED=y
296CONFIG_MAC_PARTITION=y 278CONFIG_MAC_PARTITION=y
@@ -335,7 +317,6 @@ CONFIG_NLS_UTF8=y
335CONFIG_MAGIC_SYSRQ=y 317CONFIG_MAGIC_SYSRQ=y
336CONFIG_DEBUG_FS=y 318CONFIG_DEBUG_FS=y
337CONFIG_DEBUG_KERNEL=y 319CONFIG_DEBUG_KERNEL=y
338# CONFIG_RCU_CPU_STALL_DETECTOR is not set
339# CONFIG_FTRACE is not set 320# CONFIG_FTRACE is not set
340CONFIG_CRYPTO_PCBC=y 321CONFIG_CRYPTO_PCBC=y
341CONFIG_CRYPTO_SHA1=y 322CONFIG_CRYPTO_SHA1=y
diff --git a/arch/arm/configs/at91sam9260ek_defconfig b/arch/arm/configs/at91sam9260_defconfig
index f8a9226413bf..505b3765f87e 100644
--- a/arch/arm/configs/at91sam9260ek_defconfig
+++ b/arch/arm/configs/at91sam9260_defconfig
@@ -12,11 +12,23 @@ CONFIG_MODULE_UNLOAD=y
12# CONFIG_IOSCHED_CFQ is not set 12# CONFIG_IOSCHED_CFQ is not set
13CONFIG_ARCH_AT91=y 13CONFIG_ARCH_AT91=y
14CONFIG_ARCH_AT91SAM9260=y 14CONFIG_ARCH_AT91SAM9260=y
15CONFIG_ARCH_AT91SAM9260_SAM9XE=y
15CONFIG_MACH_AT91SAM9260EK=y 16CONFIG_MACH_AT91SAM9260EK=y
17CONFIG_MACH_CAM60=y
18CONFIG_MACH_SAM9_L9260=y
19CONFIG_MACH_AFEB9260=y
20CONFIG_MACH_USB_A9260=y
21CONFIG_MACH_QIL_A9260=y
22CONFIG_MACH_CPU9260=y
23CONFIG_MACH_FLEXIBITY=y
24CONFIG_MACH_SNAPPER_9260=y
25CONFIG_MACH_AT91SAM_DT=y
16CONFIG_AT91_PROGRAMMABLE_CLOCKS=y 26CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
17# CONFIG_ARM_THUMB is not set 27# CONFIG_ARM_THUMB is not set
18CONFIG_ZBOOT_ROM_TEXT=0x0 28CONFIG_ZBOOT_ROM_TEXT=0x0
19CONFIG_ZBOOT_ROM_BSS=0x0 29CONFIG_ZBOOT_ROM_BSS=0x0
30CONFIG_ARM_APPENDED_DTB=y
31CONFIG_ARM_ATAG_DTB_COMPAT=y
20CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw" 32CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
21CONFIG_FPE_NWFPE=y 33CONFIG_FPE_NWFPE=y
22CONFIG_NET=y 34CONFIG_NET=y
@@ -33,12 +45,10 @@ CONFIG_IP_PNP_BOOTP=y
33CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 45CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
34CONFIG_BLK_DEV_RAM=y 46CONFIG_BLK_DEV_RAM=y
35CONFIG_BLK_DEV_RAM_SIZE=8192 47CONFIG_BLK_DEV_RAM_SIZE=8192
36CONFIG_ATMEL_SSC=y
37CONFIG_SCSI=y 48CONFIG_SCSI=y
38CONFIG_BLK_DEV_SD=y 49CONFIG_BLK_DEV_SD=y
39CONFIG_SCSI_MULTI_LUN=y 50CONFIG_SCSI_MULTI_LUN=y
40CONFIG_NETDEVICES=y 51CONFIG_NETDEVICES=y
41CONFIG_NET_ETHERNET=y
42CONFIG_MII=y 52CONFIG_MII=y
43CONFIG_MACB=y 53CONFIG_MACB=y
44# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 54# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
@@ -55,7 +65,6 @@ CONFIG_I2C_GPIO=y
55CONFIG_WATCHDOG=y 65CONFIG_WATCHDOG=y
56CONFIG_WATCHDOG_NOWAYOUT=y 66CONFIG_WATCHDOG_NOWAYOUT=y
57CONFIG_AT91SAM9X_WATCHDOG=y 67CONFIG_AT91SAM9X_WATCHDOG=y
58# CONFIG_VGA_CONSOLE is not set
59# CONFIG_USB_HID is not set 68# CONFIG_USB_HID is not set
60CONFIG_USB=y 69CONFIG_USB=y
61CONFIG_USB_DEVICEFS=y 70CONFIG_USB_DEVICEFS=y
@@ -71,7 +80,6 @@ CONFIG_USB_G_SERIAL=m
71CONFIG_RTC_CLASS=y 80CONFIG_RTC_CLASS=y
72CONFIG_RTC_DRV_AT91SAM9=y 81CONFIG_RTC_DRV_AT91SAM9=y
73CONFIG_EXT2_FS=y 82CONFIG_EXT2_FS=y
74CONFIG_INOTIFY=y
75CONFIG_VFAT_FS=y 83CONFIG_VFAT_FS=y
76CONFIG_TMPFS=y 84CONFIG_TMPFS=y
77CONFIG_CRAMFS=y 85CONFIG_CRAMFS=y
diff --git a/arch/arm/configs/at91sam9g20ek_defconfig b/arch/arm/configs/at91sam9g20_defconfig
index 9e90e6d79297..9123568d9a8d 100644
--- a/arch/arm/configs/at91sam9g20ek_defconfig
+++ b/arch/arm/configs/at91sam9g20_defconfig
@@ -14,6 +14,15 @@ CONFIG_ARCH_AT91=y
14CONFIG_ARCH_AT91SAM9G20=y 14CONFIG_ARCH_AT91SAM9G20=y
15CONFIG_MACH_AT91SAM9G20EK=y 15CONFIG_MACH_AT91SAM9G20EK=y
16CONFIG_MACH_AT91SAM9G20EK_2MMC=y 16CONFIG_MACH_AT91SAM9G20EK_2MMC=y
17CONFIG_MACH_CPU9G20=y
18CONFIG_MACH_ACMENETUSFOXG20=y
19CONFIG_MACH_PORTUXG20=y
20CONFIG_MACH_STAMP9G20=y
21CONFIG_MACH_PCONTROL_G20=y
22CONFIG_MACH_GSIA18S=y
23CONFIG_MACH_USB_A9G20=y
24CONFIG_MACH_SNAPPER_9260=y
25CONFIG_MACH_AT91SAM_DT=y
17CONFIG_AT91_PROGRAMMABLE_CLOCKS=y 26CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
18# CONFIG_ARM_THUMB is not set 27# CONFIG_ARM_THUMB is not set
19CONFIG_AEABI=y 28CONFIG_AEABI=y
@@ -21,9 +30,10 @@ CONFIG_LEDS=y
21CONFIG_LEDS_CPU=y 30CONFIG_LEDS_CPU=y
22CONFIG_ZBOOT_ROM_TEXT=0x0 31CONFIG_ZBOOT_ROM_TEXT=0x0
23CONFIG_ZBOOT_ROM_BSS=0x0 32CONFIG_ZBOOT_ROM_BSS=0x0
33CONFIG_ARM_APPENDED_DTB=y
34CONFIG_ARM_ATAG_DTB_COMPAT=y
24CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw" 35CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
25CONFIG_FPE_NWFPE=y 36CONFIG_FPE_NWFPE=y
26CONFIG_PM=y
27CONFIG_NET=y 37CONFIG_NET=y
28CONFIG_PACKET=y 38CONFIG_PACKET=y
29CONFIG_UNIX=y 39CONFIG_UNIX=y
@@ -37,8 +47,6 @@ CONFIG_IP_PNP_BOOTP=y
37# CONFIG_IPV6 is not set 47# CONFIG_IPV6 is not set
38CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 48CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
39CONFIG_MTD=y 49CONFIG_MTD=y
40CONFIG_MTD_CONCAT=y
41CONFIG_MTD_PARTITIONS=y
42CONFIG_MTD_CMDLINE_PARTS=y 50CONFIG_MTD_CMDLINE_PARTS=y
43CONFIG_MTD_CHAR=y 51CONFIG_MTD_CHAR=y
44CONFIG_MTD_BLOCK=y 52CONFIG_MTD_BLOCK=y
@@ -48,17 +56,13 @@ CONFIG_MTD_NAND_ATMEL=y
48CONFIG_BLK_DEV_LOOP=y 56CONFIG_BLK_DEV_LOOP=y
49CONFIG_BLK_DEV_RAM=y 57CONFIG_BLK_DEV_RAM=y
50CONFIG_BLK_DEV_RAM_SIZE=8192 58CONFIG_BLK_DEV_RAM_SIZE=8192
51CONFIG_ATMEL_SSC=y
52CONFIG_SCSI=y 59CONFIG_SCSI=y
53CONFIG_BLK_DEV_SD=y 60CONFIG_BLK_DEV_SD=y
54CONFIG_SCSI_MULTI_LUN=y 61CONFIG_SCSI_MULTI_LUN=y
55# CONFIG_SCSI_LOWLEVEL is not set 62# CONFIG_SCSI_LOWLEVEL is not set
56CONFIG_NETDEVICES=y 63CONFIG_NETDEVICES=y
57CONFIG_NET_ETHERNET=y
58CONFIG_MII=y 64CONFIG_MII=y
59CONFIG_MACB=y 65CONFIG_MACB=y
60# CONFIG_NETDEV_1000 is not set
61# CONFIG_NETDEV_10000 is not set
62# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 66# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
63CONFIG_INPUT_MOUSEDEV_SCREEN_X=320 67CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
64CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240 68CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240
@@ -66,15 +70,14 @@ CONFIG_INPUT_EVDEV=y
66# CONFIG_KEYBOARD_ATKBD is not set 70# CONFIG_KEYBOARD_ATKBD is not set
67CONFIG_KEYBOARD_GPIO=y 71CONFIG_KEYBOARD_GPIO=y
68# CONFIG_INPUT_MOUSE is not set 72# CONFIG_INPUT_MOUSE is not set
73CONFIG_LEGACY_PTY_COUNT=16
69CONFIG_SERIAL_ATMEL=y 74CONFIG_SERIAL_ATMEL=y
70CONFIG_SERIAL_ATMEL_CONSOLE=y 75CONFIG_SERIAL_ATMEL_CONSOLE=y
71CONFIG_LEGACY_PTY_COUNT=16
72CONFIG_HW_RANDOM=y 76CONFIG_HW_RANDOM=y
73CONFIG_SPI=y 77CONFIG_SPI=y
74CONFIG_SPI_ATMEL=y 78CONFIG_SPI_ATMEL=y
75CONFIG_SPI_SPIDEV=y 79CONFIG_SPI_SPIDEV=y
76# CONFIG_HWMON is not set 80# CONFIG_HWMON is not set
77# CONFIG_VGA_CONSOLE is not set
78CONFIG_SOUND=y 81CONFIG_SOUND=y
79CONFIG_SND=y 82CONFIG_SND=y
80CONFIG_SND_SEQUENCER=y 83CONFIG_SND_SEQUENCER=y
@@ -82,7 +85,6 @@ CONFIG_SND_MIXER_OSS=y
82CONFIG_SND_PCM_OSS=y 85CONFIG_SND_PCM_OSS=y
83CONFIG_SND_SEQUENCER_OSS=y 86CONFIG_SND_SEQUENCER_OSS=y
84# CONFIG_SND_VERBOSE_PROCFS is not set 87# CONFIG_SND_VERBOSE_PROCFS is not set
85CONFIG_SND_AT73C213=y
86CONFIG_USB=y 88CONFIG_USB=y
87CONFIG_USB_DEVICEFS=y 89CONFIG_USB_DEVICEFS=y
88# CONFIG_USB_DEVICE_CLASS is not set 90# CONFIG_USB_DEVICE_CLASS is not set
@@ -105,7 +107,6 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
105CONFIG_RTC_CLASS=y 107CONFIG_RTC_CLASS=y
106CONFIG_RTC_DRV_AT91SAM9=y 108CONFIG_RTC_DRV_AT91SAM9=y
107CONFIG_EXT2_FS=y 109CONFIG_EXT2_FS=y
108CONFIG_INOTIFY=y
109CONFIG_MSDOS_FS=y 110CONFIG_MSDOS_FS=y
110CONFIG_VFAT_FS=y 111CONFIG_VFAT_FS=y
111CONFIG_TMPFS=y 112CONFIG_TMPFS=y
diff --git a/arch/arm/configs/at91sam9g45_defconfig b/arch/arm/configs/at91sam9g45_defconfig
index c5876d244f4b..606d48f3b8f8 100644
--- a/arch/arm/configs/at91sam9g45_defconfig
+++ b/arch/arm/configs/at91sam9g45_defconfig
@@ -18,6 +18,7 @@ CONFIG_MODULE_UNLOAD=y
18CONFIG_ARCH_AT91=y 18CONFIG_ARCH_AT91=y
19CONFIG_ARCH_AT91SAM9G45=y 19CONFIG_ARCH_AT91SAM9G45=y
20CONFIG_MACH_AT91SAM9M10G45EK=y 20CONFIG_MACH_AT91SAM9M10G45EK=y
21CONFIG_MACH_AT91SAM_DT=y
21CONFIG_AT91_PROGRAMMABLE_CLOCKS=y 22CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
22CONFIG_AT91_SLOW_CLOCK=y 23CONFIG_AT91_SLOW_CLOCK=y
23CONFIG_AEABI=y 24CONFIG_AEABI=y
@@ -73,11 +74,8 @@ CONFIG_SCSI_MULTI_LUN=y
73# CONFIG_SCSI_LOWLEVEL is not set 74# CONFIG_SCSI_LOWLEVEL is not set
74CONFIG_NETDEVICES=y 75CONFIG_NETDEVICES=y
75CONFIG_MII=y 76CONFIG_MII=y
76CONFIG_DAVICOM_PHY=y
77CONFIG_NET_ETHERNET=y
78CONFIG_MACB=y 77CONFIG_MACB=y
79# CONFIG_NETDEV_1000 is not set 78CONFIG_DAVICOM_PHY=y
80# CONFIG_NETDEV_10000 is not set
81CONFIG_LIBERTAS_THINFIRM=m 79CONFIG_LIBERTAS_THINFIRM=m
82CONFIG_LIBERTAS_THINFIRM_USB=m 80CONFIG_LIBERTAS_THINFIRM_USB=m
83CONFIG_AT76C50X_USB=m 81CONFIG_AT76C50X_USB=m
@@ -131,7 +129,6 @@ CONFIG_I2C_GPIO=y
131CONFIG_SPI=y 129CONFIG_SPI=y
132CONFIG_SPI_ATMEL=y 130CONFIG_SPI_ATMEL=y
133# CONFIG_HWMON is not set 131# CONFIG_HWMON is not set
134# CONFIG_MFD_SUPPORT is not set
135CONFIG_FB=y 132CONFIG_FB=y
136CONFIG_FB_ATMEL=y 133CONFIG_FB_ATMEL=y
137CONFIG_FB_UDL=m 134CONFIG_FB_UDL=m
diff --git a/arch/arm/configs/at91sam9rlek_defconfig b/arch/arm/configs/at91sam9rl_defconfig
index 75621e4d03fc..ad562ee64209 100644
--- a/arch/arm/configs/at91sam9rlek_defconfig
+++ b/arch/arm/configs/at91sam9rl_defconfig
@@ -23,8 +23,6 @@ CONFIG_NET=y
23CONFIG_UNIX=y 23CONFIG_UNIX=y
24CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 24CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
25CONFIG_MTD=y 25CONFIG_MTD=y
26CONFIG_MTD_CONCAT=y
27CONFIG_MTD_PARTITIONS=y
28CONFIG_MTD_CMDLINE_PARTS=y 26CONFIG_MTD_CMDLINE_PARTS=y
29CONFIG_MTD_CHAR=y 27CONFIG_MTD_CHAR=y
30CONFIG_MTD_BLOCK=y 28CONFIG_MTD_BLOCK=y
@@ -35,7 +33,6 @@ CONFIG_BLK_DEV_LOOP=y
35CONFIG_BLK_DEV_RAM=y 33CONFIG_BLK_DEV_RAM=y
36CONFIG_BLK_DEV_RAM_COUNT=4 34CONFIG_BLK_DEV_RAM_COUNT=4
37CONFIG_BLK_DEV_RAM_SIZE=24576 35CONFIG_BLK_DEV_RAM_SIZE=24576
38CONFIG_ATMEL_SSC=y
39CONFIG_SCSI=y 36CONFIG_SCSI=y
40CONFIG_BLK_DEV_SD=y 37CONFIG_BLK_DEV_SD=y
41CONFIG_SCSI_MULTI_LUN=y 38CONFIG_SCSI_MULTI_LUN=y
@@ -62,13 +59,11 @@ CONFIG_WATCHDOG_NOWAYOUT=y
62CONFIG_AT91SAM9X_WATCHDOG=y 59CONFIG_AT91SAM9X_WATCHDOG=y
63CONFIG_FB=y 60CONFIG_FB=y
64CONFIG_FB_ATMEL=y 61CONFIG_FB_ATMEL=y
65# CONFIG_VGA_CONSOLE is not set
66CONFIG_MMC=y 62CONFIG_MMC=y
67CONFIG_MMC_AT91=m 63CONFIG_MMC_AT91=m
68CONFIG_RTC_CLASS=y 64CONFIG_RTC_CLASS=y
69CONFIG_RTC_DRV_AT91SAM9=y 65CONFIG_RTC_DRV_AT91SAM9=y
70CONFIG_EXT2_FS=y 66CONFIG_EXT2_FS=y
71CONFIG_INOTIFY=y
72CONFIG_MSDOS_FS=y 67CONFIG_MSDOS_FS=y
73CONFIG_VFAT_FS=y 68CONFIG_VFAT_FS=y
74CONFIG_TMPFS=y 69CONFIG_TMPFS=y
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index 227a477346ed..d95763d5f0d8 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -287,7 +287,7 @@ CONFIG_USB=y
287# CONFIG_USB_DEVICE_CLASS is not set 287# CONFIG_USB_DEVICE_CLASS is not set
288CONFIG_USB_OHCI_HCD=y 288CONFIG_USB_OHCI_HCD=y
289CONFIG_USB_GADGET=y 289CONFIG_USB_GADGET=y
290CONFIG_USB_GADGET_PXA27X=y 290CONFIG_USB_PXA27X=y
291CONFIG_USB_ETH=m 291CONFIG_USB_ETH=m
292# CONFIG_USB_ETH_RNDIS is not set 292# CONFIG_USB_ETH_RNDIS is not set
293CONFIG_MMC=y 293CONFIG_MMC=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 176ec22af034..fd996bb13022 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -263,7 +263,7 @@ CONFIG_USB=y
263# CONFIG_USB_DEVICE_CLASS is not set 263# CONFIG_USB_DEVICE_CLASS is not set
264CONFIG_USB_OHCI_HCD=y 264CONFIG_USB_OHCI_HCD=y
265CONFIG_USB_GADGET=y 265CONFIG_USB_GADGET=y
266CONFIG_USB_GADGET_PXA27X=y 266CONFIG_USB_PXA27X=y
267CONFIG_USB_ETH=m 267CONFIG_USB_ETH=m
268# CONFIG_USB_ETH_RNDIS is not set 268# CONFIG_USB_ETH_RNDIS is not set
269CONFIG_MMC=y 269CONFIG_MMC=y
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index a88e64d4e9a5..443675d317e6 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -132,7 +132,7 @@ CONFIG_USB_MON=m
132CONFIG_USB_OHCI_HCD=y 132CONFIG_USB_OHCI_HCD=y
133CONFIG_USB_GADGET=y 133CONFIG_USB_GADGET=y
134CONFIG_USB_GADGET_VBUS_DRAW=500 134CONFIG_USB_GADGET_VBUS_DRAW=500
135CONFIG_USB_GADGET_PXA27X=y 135CONFIG_USB_PXA27X=y
136CONFIG_USB_ETH=m 136CONFIG_USB_ETH=m
137# CONFIG_USB_ETH_RNDIS is not set 137# CONFIG_USB_ETH_RNDIS is not set
138CONFIG_USB_GADGETFS=m 138CONFIG_USB_GADGETFS=m
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 7b63462b349d..945a34f2a34d 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -48,13 +48,7 @@ CONFIG_MACH_SX1=y
48CONFIG_MACH_NOKIA770=y 48CONFIG_MACH_NOKIA770=y
49CONFIG_MACH_AMS_DELTA=y 49CONFIG_MACH_AMS_DELTA=y
50CONFIG_MACH_OMAP_GENERIC=y 50CONFIG_MACH_OMAP_GENERIC=y
51CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER=y
52CONFIG_OMAP_ARM_216MHZ=y
53CONFIG_OMAP_ARM_195MHZ=y
54CONFIG_OMAP_ARM_192MHZ=y
55CONFIG_OMAP_ARM_182MHZ=y 51CONFIG_OMAP_ARM_182MHZ=y
56CONFIG_OMAP_ARM_168MHZ=y
57# CONFIG_OMAP_ARM_60MHZ is not set
58# CONFIG_ARM_THUMB is not set 52# CONFIG_ARM_THUMB is not set
59CONFIG_PCCARD=y 53CONFIG_PCCARD=y
60CONFIG_OMAP_CF=y 54CONFIG_OMAP_CF=y
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index 4a5a12681be2..374000ec4e4e 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -14,8 +14,6 @@ CONFIG_MODULE_UNLOAD=y
14CONFIG_ARCH_U300=y 14CONFIG_ARCH_U300=y
15CONFIG_MACH_U300=y 15CONFIG_MACH_U300=y
16CONFIG_MACH_U300_BS335=y 16CONFIG_MACH_U300_BS335=y
17CONFIG_MACH_U300_DUAL_RAM=y
18CONFIG_U300_DEBUG=y
19CONFIG_MACH_U300_SPIDUMMY=y 17CONFIG_MACH_U300_SPIDUMMY=y
20CONFIG_NO_HZ=y 18CONFIG_NO_HZ=y
21CONFIG_HIGH_RES_TIMERS=y 19CONFIG_HIGH_RES_TIMERS=y
@@ -26,19 +24,21 @@ CONFIG_ZBOOT_ROM_BSS=0x0
26CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072" 24CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072"
27CONFIG_CPU_IDLE=y 25CONFIG_CPU_IDLE=y
28CONFIG_FPE_NWFPE=y 26CONFIG_FPE_NWFPE=y
29CONFIG_PM=y
30# CONFIG_SUSPEND is not set 27# CONFIG_SUSPEND is not set
31CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 28CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
32# CONFIG_PREVENT_FIRMWARE_BUILD is not set 29# CONFIG_PREVENT_FIRMWARE_BUILD is not set
33# CONFIG_MISC_DEVICES is not set 30CONFIG_MTD=y
31CONFIG_MTD_CMDLINE_PARTS=y
32CONFIG_MTD_NAND=y
33CONFIG_MTD_NAND_FSMC=y
34# CONFIG_INPUT_MOUSEDEV is not set 34# CONFIG_INPUT_MOUSEDEV is not set
35CONFIG_INPUT_EVDEV=y 35CONFIG_INPUT_EVDEV=y
36# CONFIG_KEYBOARD_ATKBD is not set 36# CONFIG_KEYBOARD_ATKBD is not set
37# CONFIG_INPUT_MOUSE is not set 37# CONFIG_INPUT_MOUSE is not set
38# CONFIG_SERIO is not set 38# CONFIG_SERIO is not set
39CONFIG_LEGACY_PTY_COUNT=16
39CONFIG_SERIAL_AMBA_PL011=y 40CONFIG_SERIAL_AMBA_PL011=y
40CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 41CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
41CONFIG_LEGACY_PTY_COUNT=16
42# CONFIG_HW_RANDOM is not set 42# CONFIG_HW_RANDOM is not set
43CONFIG_I2C=y 43CONFIG_I2C=y
44# CONFIG_HWMON is not set 44# CONFIG_HWMON is not set
@@ -51,6 +51,7 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
51# CONFIG_HID_SUPPORT is not set 51# CONFIG_HID_SUPPORT is not set
52# CONFIG_USB_SUPPORT is not set 52# CONFIG_USB_SUPPORT is not set
53CONFIG_MMC=y 53CONFIG_MMC=y
54CONFIG_MMC_CLKGATE=y
54CONFIG_MMC_ARMMMCI=y 55CONFIG_MMC_ARMMMCI=y
55CONFIG_RTC_CLASS=y 56CONFIG_RTC_CLASS=y
56# CONFIG_RTC_HCTOSYS is not set 57# CONFIG_RTC_HCTOSYS is not set
@@ -65,10 +66,8 @@ CONFIG_NLS_CODEPAGE_437=y
65CONFIG_NLS_ISO8859_1=y 66CONFIG_NLS_ISO8859_1=y
66CONFIG_PRINTK_TIME=y 67CONFIG_PRINTK_TIME=y
67CONFIG_DEBUG_FS=y 68CONFIG_DEBUG_FS=y
68CONFIG_DEBUG_KERNEL=y
69# CONFIG_SCHED_DEBUG is not set 69# CONFIG_SCHED_DEBUG is not set
70CONFIG_TIMER_STATS=y 70CONFIG_TIMER_STATS=y
71# CONFIG_DEBUG_PREEMPT is not set 71# CONFIG_DEBUG_PREEMPT is not set
72CONFIG_DEBUG_INFO=y 72CONFIG_DEBUG_INFO=y
73# CONFIG_RCU_CPU_STALL_DETECTOR is not set
74# CONFIG_CRC32 is not set 73# CONFIG_CRC32 is not set
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 97d31a4663da..2d7b6e7b7271 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -10,7 +10,7 @@ CONFIG_MODULE_UNLOAD=y
10CONFIG_ARCH_U8500=y 10CONFIG_ARCH_U8500=y
11CONFIG_UX500_SOC_DB5500=y 11CONFIG_UX500_SOC_DB5500=y
12CONFIG_UX500_SOC_DB8500=y 12CONFIG_UX500_SOC_DB8500=y
13CONFIG_MACH_U8500=y 13CONFIG_MACH_HREFV60=y
14CONFIG_MACH_SNOWBALL=y 14CONFIG_MACH_SNOWBALL=y
15CONFIG_MACH_U5500=y 15CONFIG_MACH_U5500=y
16CONFIG_NO_HZ=y 16CONFIG_NO_HZ=y
@@ -24,6 +24,7 @@ CONFIG_CPU_FREQ=y
24CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 24CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
25CONFIG_VFP=y 25CONFIG_VFP=y
26CONFIG_NEON=y 26CONFIG_NEON=y
27CONFIG_PM_RUNTIME=y
27CONFIG_NET=y 28CONFIG_NET=y
28CONFIG_PACKET=y 29CONFIG_PACKET=y
29CONFIG_UNIX=y 30CONFIG_UNIX=y
@@ -41,11 +42,8 @@ CONFIG_MISC_DEVICES=y
41CONFIG_AB8500_PWM=y 42CONFIG_AB8500_PWM=y
42CONFIG_SENSORS_BH1780=y 43CONFIG_SENSORS_BH1780=y
43CONFIG_NETDEVICES=y 44CONFIG_NETDEVICES=y
44CONFIG_SMSC_PHY=y
45CONFIG_NET_ETHERNET=y
46CONFIG_SMSC911X=y 45CONFIG_SMSC911X=y
47# CONFIG_NETDEV_1000 is not set 46CONFIG_SMSC_PHY=y
48# CONFIG_NETDEV_10000 is not set
49# CONFIG_WLAN is not set 47# CONFIG_WLAN is not set
50# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 48# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
51CONFIG_INPUT_EVDEV=y 49CONFIG_INPUT_EVDEV=y
@@ -72,15 +70,12 @@ CONFIG_SPI=y
72CONFIG_SPI_PL022=y 70CONFIG_SPI_PL022=y
73CONFIG_GPIO_STMPE=y 71CONFIG_GPIO_STMPE=y
74CONFIG_GPIO_TC3589X=y 72CONFIG_GPIO_TC3589X=y
75# CONFIG_HWMON is not set
76CONFIG_MFD_STMPE=y 73CONFIG_MFD_STMPE=y
77CONFIG_MFD_TC3589X=y 74CONFIG_MFD_TC3589X=y
75CONFIG_AB5500_CORE=y
78CONFIG_AB8500_CORE=y 76CONFIG_AB8500_CORE=y
79CONFIG_REGULATOR_AB8500=y 77CONFIG_REGULATOR_AB8500=y
80# CONFIG_HID_SUPPORT is not set 78# CONFIG_HID_SUPPORT is not set
81CONFIG_USB_MUSB_HDRC=y
82CONFIG_USB_GADGET_MUSB_HDRC=y
83CONFIG_MUSB_PIO_ONLY=y
84CONFIG_USB_GADGET=y 79CONFIG_USB_GADGET=y
85CONFIG_AB8500_USB=y 80CONFIG_AB8500_USB=y
86CONFIG_MMC=y 81CONFIG_MMC=y
@@ -97,6 +92,7 @@ CONFIG_DMADEVICES=y
97CONFIG_STE_DMA40=y 92CONFIG_STE_DMA40=y
98CONFIG_STAGING=y 93CONFIG_STAGING=y
99CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y 94CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
95CONFIG_HSEM_U8500=y
100CONFIG_EXT2_FS=y 96CONFIG_EXT2_FS=y
101CONFIG_EXT2_FS_XATTR=y 97CONFIG_EXT2_FS_XATTR=y
102CONFIG_EXT2_FS_POSIX_ACL=y 98CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
index 59577ad3f4ef..547a3c1e59db 100644
--- a/arch/arm/configs/zeus_defconfig
+++ b/arch/arm/configs/zeus_defconfig
@@ -140,7 +140,7 @@ CONFIG_USB_SERIAL=m
140CONFIG_USB_SERIAL_GENERIC=y 140CONFIG_USB_SERIAL_GENERIC=y
141CONFIG_USB_SERIAL_MCT_U232=m 141CONFIG_USB_SERIAL_MCT_U232=m
142CONFIG_USB_GADGET=m 142CONFIG_USB_GADGET=m
143CONFIG_USB_GADGET_PXA27X=y 143CONFIG_USB_PXA27X=y
144CONFIG_USB_ETH=m 144CONFIG_USB_ETH=m
145CONFIG_USB_GADGETFS=m 145CONFIG_USB_GADGETFS=m
146CONFIG_USB_FILE_STORAGE=m 146CONFIG_USB_FILE_STORAGE=m
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 1db1143a9483..7df239bcdf27 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -20,6 +20,8 @@
20#ifndef __ASM_ARM_HARDWARE_L2X0_H 20#ifndef __ASM_ARM_HARDWARE_L2X0_H
21#define __ASM_ARM_HARDWARE_L2X0_H 21#define __ASM_ARM_HARDWARE_L2X0_H
22 22
23#include <linux/errno.h>
24
23#define L2X0_CACHE_ID 0x000 25#define L2X0_CACHE_ID 0x000
24#define L2X0_CACHE_TYPE 0x004 26#define L2X0_CACHE_TYPE 0x004
25#define L2X0_CTRL 0x100 27#define L2X0_CTRL 0x100
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 7d19425dd496..2b0efc3104ac 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -13,6 +13,7 @@
13struct tag; 13struct tag;
14struct meminfo; 14struct meminfo;
15struct sys_timer; 15struct sys_timer;
16struct pt_regs;
16 17
17struct machine_desc { 18struct machine_desc {
18 unsigned int nr; /* architecture number */ 19 unsigned int nr; /* architecture number */
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 71d99b83cdb9..0bda22c094a6 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -55,16 +55,6 @@ reserve_pmu(enum arm_pmu_type type);
55extern void 55extern void
56release_pmu(enum arm_pmu_type type); 56release_pmu(enum arm_pmu_type type);
57 57
58/**
59 * init_pmu() - Initialise the PMU.
60 *
61 * Initialise the system ready for PMU enabling. This should typically set the
62 * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do
63 * the actual hardware initialisation.
64 */
65extern int
66init_pmu(enum arm_pmu_type type);
67
68#else /* CONFIG_CPU_HAS_PMU */ 58#else /* CONFIG_CPU_HAS_PMU */
69 59
70#include <linux/err.h> 60#include <linux/err.h>
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index a7e457ed27c3..58b8b84adcd2 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -25,7 +25,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
25 25
26void init_cpu_topology(void); 26void init_cpu_topology(void);
27void store_cpu_topology(unsigned int cpuid); 27void store_cpu_topology(unsigned int cpuid);
28const struct cpumask *cpu_coregroup_mask(unsigned int cpu); 28const struct cpumask *cpu_coregroup_mask(int cpu);
29 29
30#else 30#else
31 31
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index c60a2944f95b..4a1123783806 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -402,6 +402,8 @@
402#define __NR_syncfs (__NR_SYSCALL_BASE+373) 402#define __NR_syncfs (__NR_SYSCALL_BASE+373)
403#define __NR_sendmmsg (__NR_SYSCALL_BASE+374) 403#define __NR_sendmmsg (__NR_SYSCALL_BASE+374)
404#define __NR_setns (__NR_SYSCALL_BASE+375) 404#define __NR_setns (__NR_SYSCALL_BASE+375)
405#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
406#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
405 407
406/* 408/*
407 * The following SWIs are ARM private. 409 * The following SWIs are ARM private.
diff --git a/arch/arm/include/asm/unwind.h b/arch/arm/include/asm/unwind.h
index a5edf421005c..d1c3f3a71c94 100644
--- a/arch/arm/include/asm/unwind.h
+++ b/arch/arm/include/asm/unwind.h
@@ -30,14 +30,15 @@ enum unwind_reason_code {
30}; 30};
31 31
32struct unwind_idx { 32struct unwind_idx {
33 unsigned long addr; 33 unsigned long addr_offset;
34 unsigned long insn; 34 unsigned long insn;
35}; 35};
36 36
37struct unwind_table { 37struct unwind_table {
38 struct list_head list; 38 struct list_head list;
39 struct unwind_idx *start; 39 const struct unwind_idx *start;
40 struct unwind_idx *stop; 40 const struct unwind_idx *origin;
41 const struct unwind_idx *stop;
41 unsigned long begin_addr; 42 unsigned long begin_addr;
42 unsigned long end_addr; 43 unsigned long end_addr;
43}; 44};
@@ -49,15 +50,6 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
49extern void unwind_table_del(struct unwind_table *tab); 50extern void unwind_table_del(struct unwind_table *tab);
50extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk); 51extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
51 52
52#ifdef CONFIG_ARM_UNWIND
53extern int __init unwind_init(void);
54#else
55static inline int __init unwind_init(void)
56{
57 return 0;
58}
59#endif
60
61#endif /* !__ASSEMBLY__ */ 53#endif /* !__ASSEMBLY__ */
62 54
63#ifdef CONFIG_ARM_UNWIND 55#ifdef CONFIG_ARM_UNWIND
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 9943e9e74a1b..463ff4a0ec8a 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -385,6 +385,8 @@
385 CALL(sys_syncfs) 385 CALL(sys_syncfs)
386 CALL(sys_sendmmsg) 386 CALL(sys_sendmmsg)
387/* 375 */ CALL(sys_setns) 387/* 375 */ CALL(sys_setns)
388 CALL(sys_process_vm_readv)
389 CALL(sys_process_vm_writev)
388#ifndef syscalls_counted 390#ifndef syscalls_counted
389.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 391.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
390#define syscalls_counted 392#define syscalls_counted
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 9ad50c4208ae..b145f16c91bc 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -497,7 +497,7 @@ ENDPROC(__und_usr)
497 .popsection 497 .popsection
498 .pushsection __ex_table,"a" 498 .pushsection __ex_table,"a"
499 .long 1b, 4b 499 .long 1b, 4b
500#if __LINUX_ARM_ARCH__ >= 7 500#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
501 .long 2b, 4b 501 .long 2b, 4b
502 .long 3b, 4b 502 .long 3b, 4b
503#endif 503#endif
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 566c54c2a1fe..08c82fd844a8 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -360,7 +360,7 @@ __secondary_data:
360 * r13 = *virtual* address to jump to upon completion 360 * r13 = *virtual* address to jump to upon completion
361 */ 361 */
362__enable_mmu: 362__enable_mmu:
363#ifdef CONFIG_ALIGNMENT_TRAP 363#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
364 orr r0, r0, #CR_A 364 orr r0, r0, #CR_A
365#else 365#else
366 bic r0, r0, #CR_A 366 bic r0, r0, #CR_A
diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c
index 9fe8910308af..8a30c89da70e 100644
--- a/arch/arm/kernel/kprobes-arm.c
+++ b/arch/arm/kernel/kprobes-arm.c
@@ -519,10 +519,12 @@ static const union decode_item arm_cccc_0000_____1001_table[] = {
519static const union decode_item arm_cccc_0001_____1001_table[] = { 519static const union decode_item arm_cccc_0001_____1001_table[] = {
520 /* Synchronization primitives */ 520 /* Synchronization primitives */
521 521
522#if __LINUX_ARM_ARCH__ < 6
523 /* Deprecated on ARMv6 and may be UNDEFINED on v7 */
522 /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */ 524 /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */
523 DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc, 525 DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc,
524 REGS(NOPC, NOPC, 0, 0, NOPC)), 526 REGS(NOPC, NOPC, 0, 0, NOPC)),
525 527#endif
526 /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */ 528 /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */
527 /* And unallocated instructions... */ 529 /* And unallocated instructions... */
528 DECODE_END 530 DECODE_END
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index fc82de8bdcce..ba32b393b3f0 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -427,18 +427,25 @@ void kprobe_arm_test_cases(void)
427 427
428 TEST_GROUP("Synchronization primitives") 428 TEST_GROUP("Synchronization primitives")
429 429
430 /* 430#if __LINUX_ARM_ARCH__ < 6
431 * Use hard coded constants for SWP instructions to avoid warnings 431 TEST_RP("swp lr, r",7,VAL2,", [r",8,0,"]")
432 * about deprecated instructions. 432 TEST_R( "swpvs r0, r",1,VAL1,", [sp]")
433 */ 433 TEST_RP("swp sp, r",14,VAL2,", [r",12,13*4,"]")
434 TEST_RP( ".word 0xe108e097 @ swp lr, r",7,VAL2,", [r",8,0,"]") 434#else
435 TEST_R( ".word 0x610d0091 @ swpvs r0, r",1,VAL1,", [sp]") 435 TEST_UNSUPPORTED(".word 0xe108e097 @ swp lr, r7, [r8]")
436 TEST_RP( ".word 0xe10cd09e @ swp sp, r",14,VAL2,", [r",12,13*4,"]") 436 TEST_UNSUPPORTED(".word 0x610d0091 @ swpvs r0, r1, [sp]")
437 TEST_UNSUPPORTED(".word 0xe10cd09e @ swp sp, r14 [r12]")
438#endif
437 TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]") 439 TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]")
438 TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]") 440 TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]")
439 TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]") 441 TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]")
440 TEST_RP( ".word 0xe148e097 @ swpb lr, r",7,VAL2,", [r",8,0,"]") 442#if __LINUX_ARM_ARCH__ < 6
441 TEST_R( ".word 0x614d0091 @ swpvsb r0, r",1,VAL1,", [sp]") 443 TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]")
444 TEST_R( "swpvsb r0, r",1,VAL1,", [sp]")
445#else
446 TEST_UNSUPPORTED(".word 0xe148e097 @ swpb lr, r7, [r8]")
447 TEST_UNSUPPORTED(".word 0x614d0091 @ swpvsb r0, r1, [sp]")
448#endif
442 TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]") 449 TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]")
443 450
444 TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */ 451 TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */
@@ -550,7 +557,7 @@ void kprobe_arm_test_cases(void)
550 TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]") 557 TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]")
551 TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!") 558 TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
552 TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!") 559 TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
553 TEST_RPR( "strd r",2, VAL1,", [r",3, 24,"], r",4,48,"") 560 TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"")
554 TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"") 561 TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"")
555 TEST_UNSUPPORTED(".word 0xe1afc0fa @ strd r12, [pc, r10]!") 562 TEST_UNSUPPORTED(".word 0xe1afc0fa @ strd r12, [pc, r10]!")
556 563
diff --git a/arch/arm/kernel/kprobes-test-thumb.c b/arch/arm/kernel/kprobes-test-thumb.c
index 5e726c31c45a..5d8b85792222 100644
--- a/arch/arm/kernel/kprobes-test-thumb.c
+++ b/arch/arm/kernel/kprobes-test-thumb.c
@@ -222,8 +222,8 @@ void kprobe_thumb16_test_cases(void)
222DONT_TEST_IN_ITBLOCK( 222DONT_TEST_IN_ITBLOCK(
223 TEST_BF_R( "cbnz r",0,0, ", 2f") 223 TEST_BF_R( "cbnz r",0,0, ", 2f")
224 TEST_BF_R( "cbz r",2,-1,", 2f") 224 TEST_BF_R( "cbz r",2,-1,", 2f")
225 TEST_BF_RX( "cbnz r",4,1, ", 2f",0x20) 225 TEST_BF_RX( "cbnz r",4,1, ", 2f", SPACE_0x20)
226 TEST_BF_RX( "cbz r",7,0, ", 2f",0x40) 226 TEST_BF_RX( "cbz r",7,0, ", 2f", SPACE_0x40)
227) 227)
228 TEST_R("sxth r0, r",7, HH1,"") 228 TEST_R("sxth r0, r",7, HH1,"")
229 TEST_R("sxth r7, r",0, HH2,"") 229 TEST_R("sxth r7, r",0, HH2,"")
@@ -246,7 +246,7 @@ DONT_TEST_IN_ITBLOCK(
246 TESTCASE_START(code) \ 246 TESTCASE_START(code) \
247 TEST_ARG_PTR(13, offset) \ 247 TEST_ARG_PTR(13, offset) \
248 TEST_ARG_END("") \ 248 TEST_ARG_END("") \
249 TEST_BRANCH_F(code,0) \ 249 TEST_BRANCH_F(code) \
250 TESTCASE_END 250 TESTCASE_END
251 251
252 TEST("push {r0}") 252 TEST("push {r0}")
@@ -319,8 +319,8 @@ CONDITION_INSTRUCTIONS(8,
319 319
320 TEST_BF( "b 2f") 320 TEST_BF( "b 2f")
321 TEST_BB( "b 2b") 321 TEST_BB( "b 2b")
322 TEST_BF_X("b 2f", 0x400) 322 TEST_BF_X("b 2f", SPACE_0x400)
323 TEST_BB_X("b 2b", 0x400) 323 TEST_BB_X("b 2b", SPACE_0x400)
324 324
325 TEST_GROUP("Testing instructions in IT blocks") 325 TEST_GROUP("Testing instructions in IT blocks")
326 326
@@ -746,7 +746,7 @@ CONDITION_INSTRUCTIONS(22,
746 TEST_BB("bne.w 2b") 746 TEST_BB("bne.w 2b")
747 TEST_BF("bgt.w 2f") 747 TEST_BF("bgt.w 2f")
748 TEST_BB("blt.w 2b") 748 TEST_BB("blt.w 2b")
749 TEST_BF_X("bpl.w 2f",0x1000) 749 TEST_BF_X("bpl.w 2f", SPACE_0x1000)
750) 750)
751 751
752 TEST_UNSUPPORTED("msr cpsr, r0") 752 TEST_UNSUPPORTED("msr cpsr, r0")
@@ -786,11 +786,11 @@ CONDITION_INSTRUCTIONS(22,
786 786
787 TEST_BF( "b.w 2f") 787 TEST_BF( "b.w 2f")
788 TEST_BB( "b.w 2b") 788 TEST_BB( "b.w 2b")
789 TEST_BF_X("b.w 2f", 0x1000) 789 TEST_BF_X("b.w 2f", SPACE_0x1000)
790 790
791 TEST_BF( "bl.w 2f") 791 TEST_BF( "bl.w 2f")
792 TEST_BB( "bl.w 2b") 792 TEST_BB( "bl.w 2b")
793 TEST_BB_X("bl.w 2b", 0x1000) 793 TEST_BB_X("bl.w 2b", SPACE_0x1000)
794 794
795 TEST_X( "blx __dummy_arm_subroutine", 795 TEST_X( "blx __dummy_arm_subroutine",
796 ".arm \n\t" 796 ".arm \n\t"
diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h
index 0dc5d77b9356..e28a869b1ae4 100644
--- a/arch/arm/kernel/kprobes-test.h
+++ b/arch/arm/kernel/kprobes-test.h
@@ -149,23 +149,31 @@ struct test_arg_end {
149 "1: "instruction" \n\t" \ 149 "1: "instruction" \n\t" \
150 " nop \n\t" 150 " nop \n\t"
151 151
152#define TEST_BRANCH_F(instruction, xtra_dist) \ 152#define TEST_BRANCH_F(instruction) \
153 TEST_INSTRUCTION(instruction) \ 153 TEST_INSTRUCTION(instruction) \
154 ".if "#xtra_dist" \n\t" \
155 " b 99f \n\t" \ 154 " b 99f \n\t" \
156 ".space "#xtra_dist" \n\t" \ 155 "2: nop \n\t"
157 ".endif \n\t" \ 156
157#define TEST_BRANCH_B(instruction) \
158 " b 50f \n\t" \
159 " b 99f \n\t" \
160 "2: nop \n\t" \
161 " b 99f \n\t" \
162 TEST_INSTRUCTION(instruction)
163
164#define TEST_BRANCH_FX(instruction, codex) \
165 TEST_INSTRUCTION(instruction) \
166 " b 99f \n\t" \
167 codex" \n\t" \
158 " b 99f \n\t" \ 168 " b 99f \n\t" \
159 "2: nop \n\t" 169 "2: nop \n\t"
160 170
161#define TEST_BRANCH_B(instruction, xtra_dist) \ 171#define TEST_BRANCH_BX(instruction, codex) \
162 " b 50f \n\t" \ 172 " b 50f \n\t" \
163 " b 99f \n\t" \ 173 " b 99f \n\t" \
164 "2: nop \n\t" \ 174 "2: nop \n\t" \
165 " b 99f \n\t" \ 175 " b 99f \n\t" \
166 ".if "#xtra_dist" \n\t" \ 176 codex" \n\t" \
167 ".space "#xtra_dist" \n\t" \
168 ".endif \n\t" \
169 TEST_INSTRUCTION(instruction) 177 TEST_INSTRUCTION(instruction)
170 178
171#define TESTCASE_END \ 179#define TESTCASE_END \
@@ -301,47 +309,60 @@ struct test_arg_end {
301 TESTCASE_START(code1 #reg1 code2) \ 309 TESTCASE_START(code1 #reg1 code2) \
302 TEST_ARG_PTR(reg1, val1) \ 310 TEST_ARG_PTR(reg1, val1) \
303 TEST_ARG_END("") \ 311 TEST_ARG_END("") \
304 TEST_BRANCH_F(code1 #reg1 code2, 0) \ 312 TEST_BRANCH_F(code1 #reg1 code2) \
305 TESTCASE_END 313 TESTCASE_END
306 314
307#define TEST_BF_X(code, xtra_dist) \ 315#define TEST_BF(code) \
308 TESTCASE_START(code) \ 316 TESTCASE_START(code) \
309 TEST_ARG_END("") \ 317 TEST_ARG_END("") \
310 TEST_BRANCH_F(code, xtra_dist) \ 318 TEST_BRANCH_F(code) \
311 TESTCASE_END 319 TESTCASE_END
312 320
313#define TEST_BB_X(code, xtra_dist) \ 321#define TEST_BB(code) \
314 TESTCASE_START(code) \ 322 TESTCASE_START(code) \
315 TEST_ARG_END("") \ 323 TEST_ARG_END("") \
316 TEST_BRANCH_B(code, xtra_dist) \ 324 TEST_BRANCH_B(code) \
317 TESTCASE_END 325 TESTCASE_END
318 326
319#define TEST_BF_RX(code1, reg, val, code2, xtra_dist) \ 327#define TEST_BF_R(code1, reg, val, code2) \
320 TESTCASE_START(code1 #reg code2) \ 328 TESTCASE_START(code1 #reg code2) \
321 TEST_ARG_REG(reg, val) \ 329 TEST_ARG_REG(reg, val) \
322 TEST_ARG_END("") \ 330 TEST_ARG_END("") \
323 TEST_BRANCH_F(code1 #reg code2, xtra_dist) \ 331 TEST_BRANCH_F(code1 #reg code2) \
324 TESTCASE_END 332 TESTCASE_END
325 333
326#define TEST_BB_RX(code1, reg, val, code2, xtra_dist) \ 334#define TEST_BB_R(code1, reg, val, code2) \
327 TESTCASE_START(code1 #reg code2) \ 335 TESTCASE_START(code1 #reg code2) \
328 TEST_ARG_REG(reg, val) \ 336 TEST_ARG_REG(reg, val) \
329 TEST_ARG_END("") \ 337 TEST_ARG_END("") \
330 TEST_BRANCH_B(code1 #reg code2, xtra_dist) \ 338 TEST_BRANCH_B(code1 #reg code2) \
331 TESTCASE_END 339 TESTCASE_END
332 340
333#define TEST_BF(code) TEST_BF_X(code, 0)
334#define TEST_BB(code) TEST_BB_X(code, 0)
335
336#define TEST_BF_R(code1, reg, val, code2) TEST_BF_RX(code1, reg, val, code2, 0)
337#define TEST_BB_R(code1, reg, val, code2) TEST_BB_RX(code1, reg, val, code2, 0)
338
339#define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \ 341#define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \
340 TESTCASE_START(code1 #reg1 code2 #reg2 code3) \ 342 TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
341 TEST_ARG_REG(reg1, val1) \ 343 TEST_ARG_REG(reg1, val1) \
342 TEST_ARG_REG(reg2, val2) \ 344 TEST_ARG_REG(reg2, val2) \
343 TEST_ARG_END("") \ 345 TEST_ARG_END("") \
344 TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3, 0) \ 346 TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3) \
347 TESTCASE_END
348
349#define TEST_BF_X(code, codex) \
350 TESTCASE_START(code) \
351 TEST_ARG_END("") \
352 TEST_BRANCH_FX(code, codex) \
353 TESTCASE_END
354
355#define TEST_BB_X(code, codex) \
356 TESTCASE_START(code) \
357 TEST_ARG_END("") \
358 TEST_BRANCH_BX(code, codex) \
359 TESTCASE_END
360
361#define TEST_BF_RX(code1, reg, val, code2, codex) \
362 TESTCASE_START(code1 #reg code2) \
363 TEST_ARG_REG(reg, val) \
364 TEST_ARG_END("") \
365 TEST_BRANCH_FX(code1 #reg code2, codex) \
345 TESTCASE_END 366 TESTCASE_END
346 367
347#define TEST_X(code, codex) \ 368#define TEST_X(code, codex) \
@@ -372,6 +393,25 @@ struct test_arg_end {
372 TESTCASE_END 393 TESTCASE_END
373 394
374 395
396/*
397 * Macros for defining space directives spread over multiple lines.
398 * These are required so the compiler guesses better the length of inline asm
399 * code and will spill the literal pool early enough to avoid generating PC
400 * relative loads with out of range offsets.
401 */
402#define TWICE(x) x x
403#define SPACE_0x8 TWICE(".space 4\n\t")
404#define SPACE_0x10 TWICE(SPACE_0x8)
405#define SPACE_0x20 TWICE(SPACE_0x10)
406#define SPACE_0x40 TWICE(SPACE_0x20)
407#define SPACE_0x80 TWICE(SPACE_0x40)
408#define SPACE_0x100 TWICE(SPACE_0x80)
409#define SPACE_0x200 TWICE(SPACE_0x100)
410#define SPACE_0x400 TWICE(SPACE_0x200)
411#define SPACE_0x800 TWICE(SPACE_0x400)
412#define SPACE_0x1000 TWICE(SPACE_0x800)
413
414
375/* Various values used in test cases... */ 415/* Various values used in test cases... */
376#define N(val) (val ^ 0xffffffff) 416#define N(val) (val ^ 0xffffffff)
377#define VAL1 0x12345678 417#define VAL1 0x12345678
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index c1b4463dcc83..e59bbd496c39 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -32,24 +32,6 @@ static atomic_t waiting_for_crash_ipi;
32 32
33int machine_kexec_prepare(struct kimage *image) 33int machine_kexec_prepare(struct kimage *image)
34{ 34{
35 unsigned long page_list;
36 void *reboot_code_buffer;
37 page_list = image->head & PAGE_MASK;
38
39 reboot_code_buffer = page_address(image->control_code_page);
40
41 /* Prepare parameters for reboot_code_buffer*/
42 kexec_start_address = image->start;
43 kexec_indirection_page = page_list;
44 kexec_mach_type = machine_arch_type;
45 kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
46
47 /* copy our kernel relocation code to the control code page */
48 memcpy(reboot_code_buffer,
49 relocate_new_kernel, relocate_new_kernel_size);
50
51 flush_icache_range((unsigned long) reboot_code_buffer,
52 (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
53 return 0; 35 return 0;
54} 36}
55 37
@@ -100,14 +82,31 @@ void (*kexec_reinit)(void);
100 82
101void machine_kexec(struct kimage *image) 83void machine_kexec(struct kimage *image)
102{ 84{
85 unsigned long page_list;
103 unsigned long reboot_code_buffer_phys; 86 unsigned long reboot_code_buffer_phys;
104 void *reboot_code_buffer; 87 void *reboot_code_buffer;
105 88
89
90 page_list = image->head & PAGE_MASK;
91
106 /* we need both effective and real address here */ 92 /* we need both effective and real address here */
107 reboot_code_buffer_phys = 93 reboot_code_buffer_phys =
108 page_to_pfn(image->control_code_page) << PAGE_SHIFT; 94 page_to_pfn(image->control_code_page) << PAGE_SHIFT;
109 reboot_code_buffer = page_address(image->control_code_page); 95 reboot_code_buffer = page_address(image->control_code_page);
110 96
97 /* Prepare parameters for reboot_code_buffer*/
98 kexec_start_address = image->start;
99 kexec_indirection_page = page_list;
100 kexec_mach_type = machine_arch_type;
101 kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
102
103 /* copy our kernel relocation code to the control code page */
104 memcpy(reboot_code_buffer,
105 relocate_new_kernel, relocate_new_kernel_size);
106
107
108 flush_icache_range((unsigned long) reboot_code_buffer,
109 (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
111 printk(KERN_INFO "Bye!\n"); 110 printk(KERN_INFO "Bye!\n");
112 111
113 if (kexec_reinit) 112 if (kexec_reinit)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 24e2347be6b1..88b0941ce51e 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -343,19 +343,25 @@ validate_group(struct perf_event *event)
343{ 343{
344 struct perf_event *sibling, *leader = event->group_leader; 344 struct perf_event *sibling, *leader = event->group_leader;
345 struct pmu_hw_events fake_pmu; 345 struct pmu_hw_events fake_pmu;
346 DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
346 347
347 memset(&fake_pmu, 0, sizeof(fake_pmu)); 348 /*
349 * Initialise the fake PMU. We only need to populate the
350 * used_mask for the purposes of validation.
351 */
352 memset(fake_used_mask, 0, sizeof(fake_used_mask));
353 fake_pmu.used_mask = fake_used_mask;
348 354
349 if (!validate_event(&fake_pmu, leader)) 355 if (!validate_event(&fake_pmu, leader))
350 return -ENOSPC; 356 return -EINVAL;
351 357
352 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { 358 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
353 if (!validate_event(&fake_pmu, sibling)) 359 if (!validate_event(&fake_pmu, sibling))
354 return -ENOSPC; 360 return -EINVAL;
355 } 361 }
356 362
357 if (!validate_event(&fake_pmu, event)) 363 if (!validate_event(&fake_pmu, event))
358 return -ENOSPC; 364 return -EINVAL;
359 365
360 return 0; 366 return 0;
361} 367}
@@ -396,6 +402,9 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
396 int i, err, irq, irqs; 402 int i, err, irq, irqs;
397 struct platform_device *pmu_device = armpmu->plat_device; 403 struct platform_device *pmu_device = armpmu->plat_device;
398 404
405 if (!pmu_device)
406 return -ENODEV;
407
399 err = reserve_pmu(armpmu->type); 408 err = reserve_pmu(armpmu->type);
400 if (err) { 409 if (err) {
401 pr_warning("unable to reserve pmu\n"); 410 pr_warning("unable to reserve pmu\n");
@@ -631,6 +640,9 @@ static struct platform_device_id armpmu_plat_device_ids[] = {
631 640
632static int __devinit armpmu_device_probe(struct platform_device *pdev) 641static int __devinit armpmu_device_probe(struct platform_device *pdev)
633{ 642{
643 if (!cpu_pmu)
644 return -ENODEV;
645
634 cpu_pmu->plat_device = pdev; 646 cpu_pmu->plat_device = pdev;
635 return 0; 647 return 0;
636} 648}
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index 2c3407ee8576..2334bf8a650a 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -33,3 +33,4 @@ release_pmu(enum arm_pmu_type type)
33{ 33{
34 clear_bit_unlock(type, pmu_lock); 34 clear_bit_unlock(type, pmu_lock);
35} 35}
36EXPORT_SYMBOL_GPL(release_pmu);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 75316f0dd02a..3d0c6fb74ae4 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -192,6 +192,9 @@ void cpu_idle(void)
192#endif 192#endif
193 193
194 local_irq_disable(); 194 local_irq_disable();
195#ifdef CONFIG_PL310_ERRATA_769419
196 wmb();
197#endif
195 if (hlt_counter) { 198 if (hlt_counter) {
196 local_irq_enable(); 199 local_irq_enable();
197 cpu_relax(); 200 cpu_relax();
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 7e7977ab994f..8fc2c8fcbdc6 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -461,8 +461,10 @@ static void __init setup_processor(void)
461 cpu_name, read_cpuid_id(), read_cpuid_id() & 15, 461 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
462 proc_arch[cpu_architecture()], cr_alignment); 462 proc_arch[cpu_architecture()], cr_alignment);
463 463
464 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); 464 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
465 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); 465 list->arch_name, ENDIANNESS);
466 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
467 list->elf_name, ENDIANNESS);
466 elf_hwcap = list->elf_hwcap; 468 elf_hwcap = list->elf_hwcap;
467#ifndef CONFIG_ARM_THUMB 469#ifndef CONFIG_ARM_THUMB
468 elf_hwcap &= ~HWCAP_THUMB; 470 elf_hwcap &= ~HWCAP_THUMB;
@@ -893,8 +895,6 @@ void __init setup_arch(char **cmdline_p)
893{ 895{
894 struct machine_desc *mdesc; 896 struct machine_desc *mdesc;
895 897
896 unwind_init();
897
898 setup_processor(); 898 setup_processor();
899 mdesc = setup_machine_fdt(__atags_pointer); 899 mdesc = setup_machine_fdt(__atags_pointer);
900 if (!mdesc) 900 if (!mdesc)
@@ -902,6 +902,12 @@ void __init setup_arch(char **cmdline_p)
902 machine_desc = mdesc; 902 machine_desc = mdesc;
903 machine_name = mdesc->name; 903 machine_name = mdesc->name;
904 904
905#ifdef CONFIG_ZONE_DMA
906 if (mdesc->dma_zone_size) {
907 extern unsigned long arm_dma_zone_size;
908 arm_dma_zone_size = mdesc->dma_zone_size;
909 }
910#endif
905 if (mdesc->soft_reboot) 911 if (mdesc->soft_reboot)
906 reboot_setup("s"); 912 reboot_setup("s");
907 913
@@ -932,12 +938,6 @@ void __init setup_arch(char **cmdline_p)
932 938
933 tcm_init(); 939 tcm_init();
934 940
935#ifdef CONFIG_ZONE_DMA
936 if (mdesc->dma_zone_size) {
937 extern unsigned long arm_dma_zone_size;
938 arm_dma_zone_size = mdesc->dma_zone_size;
939 }
940#endif
941#ifdef CONFIG_MULTI_IRQ_HANDLER 941#ifdef CONFIG_MULTI_IRQ_HANDLER
942 handle_arch_irq = mdesc->handle_irq; 942 handle_arch_irq = mdesc->handle_irq;
943#endif 943#endif
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 1040c00405d0..8200deaa14f6 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -43,7 +43,7 @@
43 43
44struct cputopo_arm cpu_topology[NR_CPUS]; 44struct cputopo_arm cpu_topology[NR_CPUS];
45 45
46const struct cpumask *cpu_coregroup_mask(unsigned int cpu) 46const struct cpumask *cpu_coregroup_mask(int cpu)
47{ 47{
48 return &cpu_topology[cpu].core_sibling; 48 return &cpu_topology[cpu].core_sibling;
49} 49}
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index e7e8365795c3..00df012c4678 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
67 67
68struct unwind_ctrl_block { 68struct unwind_ctrl_block {
69 unsigned long vrs[16]; /* virtual register set */ 69 unsigned long vrs[16]; /* virtual register set */
70 unsigned long *insn; /* pointer to the current instructions word */ 70 const unsigned long *insn; /* pointer to the current instructions word */
71 int entries; /* number of entries left to interpret */ 71 int entries; /* number of entries left to interpret */
72 int byte; /* current byte number in the instructions word */ 72 int byte; /* current byte number in the instructions word */
73}; 73};
@@ -83,8 +83,9 @@ enum regs {
83 PC = 15 83 PC = 15
84}; 84};
85 85
86extern struct unwind_idx __start_unwind_idx[]; 86extern const struct unwind_idx __start_unwind_idx[];
87extern struct unwind_idx __stop_unwind_idx[]; 87static const struct unwind_idx *__origin_unwind_idx;
88extern const struct unwind_idx __stop_unwind_idx[];
88 89
89static DEFINE_SPINLOCK(unwind_lock); 90static DEFINE_SPINLOCK(unwind_lock);
90static LIST_HEAD(unwind_tables); 91static LIST_HEAD(unwind_tables);
@@ -98,45 +99,99 @@ static LIST_HEAD(unwind_tables);
98}) 99})
99 100
100/* 101/*
101 * Binary search in the unwind index. The entries entries are 102 * Binary search in the unwind index. The entries are
102 * guaranteed to be sorted in ascending order by the linker. 103 * guaranteed to be sorted in ascending order by the linker.
104 *
105 * start = first entry
106 * origin = first entry with positive offset (or stop if there is no such entry)
107 * stop - 1 = last entry
103 */ 108 */
104static struct unwind_idx *search_index(unsigned long addr, 109static const struct unwind_idx *search_index(unsigned long addr,
105 struct unwind_idx *first, 110 const struct unwind_idx *start,
106 struct unwind_idx *last) 111 const struct unwind_idx *origin,
112 const struct unwind_idx *stop)
107{ 113{
108 pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last); 114 unsigned long addr_prel31;
115
116 pr_debug("%s(%08lx, %p, %p, %p)\n",
117 __func__, addr, start, origin, stop);
118
119 /*
120 * only search in the section with the matching sign. This way the
121 * prel31 numbers can be compared as unsigned longs.
122 */
123 if (addr < (unsigned long)start)
124 /* negative offsets: [start; origin) */
125 stop = origin;
126 else
127 /* positive offsets: [origin; stop) */
128 start = origin;
129
130 /* prel31 for address relavive to start */
131 addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
109 132
110 if (addr < first->addr) { 133 while (start < stop - 1) {
134 const struct unwind_idx *mid = start + ((stop - start) >> 1);
135
136 /*
137 * As addr_prel31 is relative to start an offset is needed to
138 * make it relative to mid.
139 */
140 if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
141 mid->addr_offset)
142 stop = mid;
143 else {
144 /* keep addr_prel31 relative to start */
145 addr_prel31 -= ((unsigned long)mid -
146 (unsigned long)start);
147 start = mid;
148 }
149 }
150
151 if (likely(start->addr_offset <= addr_prel31))
152 return start;
153 else {
111 pr_warning("unwind: Unknown symbol address %08lx\n", addr); 154 pr_warning("unwind: Unknown symbol address %08lx\n", addr);
112 return NULL; 155 return NULL;
113 } else if (addr >= last->addr) 156 }
114 return last; 157}
115 158
116 while (first < last - 1) { 159static const struct unwind_idx *unwind_find_origin(
117 struct unwind_idx *mid = first + ((last - first + 1) >> 1); 160 const struct unwind_idx *start, const struct unwind_idx *stop)
161{
162 pr_debug("%s(%p, %p)\n", __func__, start, stop);
163 while (start < stop) {
164 const struct unwind_idx *mid = start + ((stop - start) >> 1);
118 165
119 if (addr < mid->addr) 166 if (mid->addr_offset >= 0x40000000)
120 last = mid; 167 /* negative offset */
168 start = mid + 1;
121 else 169 else
122 first = mid; 170 /* positive offset */
171 stop = mid;
123 } 172 }
124 173 pr_debug("%s -> %p\n", __func__, stop);
125 return first; 174 return stop;
126} 175}
127 176
128static struct unwind_idx *unwind_find_idx(unsigned long addr) 177static const struct unwind_idx *unwind_find_idx(unsigned long addr)
129{ 178{
130 struct unwind_idx *idx = NULL; 179 const struct unwind_idx *idx = NULL;
131 unsigned long flags; 180 unsigned long flags;
132 181
133 pr_debug("%s(%08lx)\n", __func__, addr); 182 pr_debug("%s(%08lx)\n", __func__, addr);
134 183
135 if (core_kernel_text(addr)) 184 if (core_kernel_text(addr)) {
185 if (unlikely(!__origin_unwind_idx))
186 __origin_unwind_idx =
187 unwind_find_origin(__start_unwind_idx,
188 __stop_unwind_idx);
189
136 /* main unwind table */ 190 /* main unwind table */
137 idx = search_index(addr, __start_unwind_idx, 191 idx = search_index(addr, __start_unwind_idx,
138 __stop_unwind_idx - 1); 192 __origin_unwind_idx,
139 else { 193 __stop_unwind_idx);
194 } else {
140 /* module unwind tables */ 195 /* module unwind tables */
141 struct unwind_table *table; 196 struct unwind_table *table;
142 197
@@ -145,7 +200,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr)
145 if (addr >= table->begin_addr && 200 if (addr >= table->begin_addr &&
146 addr < table->end_addr) { 201 addr < table->end_addr) {
147 idx = search_index(addr, table->start, 202 idx = search_index(addr, table->start,
148 table->stop - 1); 203 table->origin,
204 table->stop);
149 /* Move-to-front to exploit common traces */ 205 /* Move-to-front to exploit common traces */
150 list_move(&table->list, &unwind_tables); 206 list_move(&table->list, &unwind_tables);
151 break; 207 break;
@@ -274,7 +330,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
274int unwind_frame(struct stackframe *frame) 330int unwind_frame(struct stackframe *frame)
275{ 331{
276 unsigned long high, low; 332 unsigned long high, low;
277 struct unwind_idx *idx; 333 const struct unwind_idx *idx;
278 struct unwind_ctrl_block ctrl; 334 struct unwind_ctrl_block ctrl;
279 335
280 /* only go to a higher address on the stack */ 336 /* only go to a higher address on the stack */
@@ -399,7 +455,6 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
399 unsigned long text_size) 455 unsigned long text_size)
400{ 456{
401 unsigned long flags; 457 unsigned long flags;
402 struct unwind_idx *idx;
403 struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL); 458 struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
404 459
405 pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size, 460 pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
@@ -408,15 +463,12 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
408 if (!tab) 463 if (!tab)
409 return tab; 464 return tab;
410 465
411 tab->start = (struct unwind_idx *)start; 466 tab->start = (const struct unwind_idx *)start;
412 tab->stop = (struct unwind_idx *)(start + size); 467 tab->stop = (const struct unwind_idx *)(start + size);
468 tab->origin = unwind_find_origin(tab->start, tab->stop);
413 tab->begin_addr = text_addr; 469 tab->begin_addr = text_addr;
414 tab->end_addr = text_addr + text_size; 470 tab->end_addr = text_addr + text_size;
415 471
416 /* Convert the symbol addresses to absolute values */
417 for (idx = tab->start; idx < tab->stop; idx++)
418 idx->addr = prel31_to_addr(&idx->addr);
419
420 spin_lock_irqsave(&unwind_lock, flags); 472 spin_lock_irqsave(&unwind_lock, flags);
421 list_add_tail(&tab->list, &unwind_tables); 473 list_add_tail(&tab->list, &unwind_tables);
422 spin_unlock_irqrestore(&unwind_lock, flags); 474 spin_unlock_irqrestore(&unwind_lock, flags);
@@ -437,16 +489,3 @@ void unwind_table_del(struct unwind_table *tab)
437 489
438 kfree(tab); 490 kfree(tab);
439} 491}
440
441int __init unwind_init(void)
442{
443 struct unwind_idx *idx;
444
445 /* Convert the symbol addresses to absolute values */
446 for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++)
447 idx->addr = prel31_to_addr(&idx->addr);
448
449 pr_debug("unwind: ARM stack unwinding initialised\n");
450
451 return 0;
452}
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 10d868a5a481..d6408d1ee543 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,5 +1,9 @@
1#include <asm/unwind.h>
2
1#if __LINUX_ARM_ARCH__ >= 6 3#if __LINUX_ARM_ARCH__ >= 6
2 .macro bitop, instr 4 .macro bitop, name, instr
5ENTRY( \name )
6UNWIND( .fnstart )
3 ands ip, r1, #3 7 ands ip, r1, #3
4 strneb r1, [ip] @ assert word-aligned 8 strneb r1, [ip] @ assert word-aligned
5 mov r2, #1 9 mov r2, #1
@@ -13,9 +17,13 @@
13 cmp r0, #0 17 cmp r0, #0
14 bne 1b 18 bne 1b
15 bx lr 19 bx lr
20UNWIND( .fnend )
21ENDPROC(\name )
16 .endm 22 .endm
17 23
18 .macro testop, instr, store 24 .macro testop, name, instr, store
25ENTRY( \name )
26UNWIND( .fnstart )
19 ands ip, r1, #3 27 ands ip, r1, #3
20 strneb r1, [ip] @ assert word-aligned 28 strneb r1, [ip] @ assert word-aligned
21 mov r2, #1 29 mov r2, #1
@@ -34,9 +42,13 @@
34 cmp r0, #0 42 cmp r0, #0
35 movne r0, #1 43 movne r0, #1
362: bx lr 442: bx lr
45UNWIND( .fnend )
46ENDPROC(\name )
37 .endm 47 .endm
38#else 48#else
39 .macro bitop, instr 49 .macro bitop, name, instr
50ENTRY( \name )
51UNWIND( .fnstart )
40 ands ip, r1, #3 52 ands ip, r1, #3
41 strneb r1, [ip] @ assert word-aligned 53 strneb r1, [ip] @ assert word-aligned
42 and r2, r0, #31 54 and r2, r0, #31
@@ -49,6 +61,8 @@
49 str r2, [r1, r0, lsl #2] 61 str r2, [r1, r0, lsl #2]
50 restore_irqs ip 62 restore_irqs ip
51 mov pc, lr 63 mov pc, lr
64UNWIND( .fnend )
65ENDPROC(\name )
52 .endm 66 .endm
53 67
54/** 68/**
@@ -59,7 +73,9 @@
59 * Note: we can trivially conditionalise the store instruction 73 * Note: we can trivially conditionalise the store instruction
60 * to avoid dirtying the data cache. 74 * to avoid dirtying the data cache.
61 */ 75 */
62 .macro testop, instr, store 76 .macro testop, name, instr, store
77ENTRY( \name )
78UNWIND( .fnstart )
63 ands ip, r1, #3 79 ands ip, r1, #3
64 strneb r1, [ip] @ assert word-aligned 80 strneb r1, [ip] @ assert word-aligned
65 and r3, r0, #31 81 and r3, r0, #31
@@ -73,5 +89,7 @@
73 moveq r0, #0 89 moveq r0, #0
74 restore_irqs ip 90 restore_irqs ip
75 mov pc, lr 91 mov pc, lr
92UNWIND( .fnend )
93ENDPROC(\name )
76 .endm 94 .endm
77#endif 95#endif
diff --git a/arch/arm/lib/changebit.S b/arch/arm/lib/changebit.S
index 68ed5b62e839..f4027862172f 100644
--- a/arch/arm/lib/changebit.S
+++ b/arch/arm/lib/changebit.S
@@ -12,6 +12,4 @@
12#include "bitops.h" 12#include "bitops.h"
13 .text 13 .text
14 14
15ENTRY(_change_bit) 15bitop _change_bit, eor
16 bitop eor
17ENDPROC(_change_bit)
diff --git a/arch/arm/lib/clearbit.S b/arch/arm/lib/clearbit.S
index 4c04c3b51eeb..f6b75fb64d30 100644
--- a/arch/arm/lib/clearbit.S
+++ b/arch/arm/lib/clearbit.S
@@ -12,6 +12,4 @@
12#include "bitops.h" 12#include "bitops.h"
13 .text 13 .text
14 14
15ENTRY(_clear_bit) 15bitop _clear_bit, bic
16 bitop bic
17ENDPROC(_clear_bit)
diff --git a/arch/arm/lib/setbit.S b/arch/arm/lib/setbit.S
index bbee5c66a23e..618fedae4b37 100644
--- a/arch/arm/lib/setbit.S
+++ b/arch/arm/lib/setbit.S
@@ -12,6 +12,4 @@
12#include "bitops.h" 12#include "bitops.h"
13 .text 13 .text
14 14
15ENTRY(_set_bit) 15bitop _set_bit, orr
16 bitop orr
17ENDPROC(_set_bit)
diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S
index 15a4d431f229..4becdc3a59cb 100644
--- a/arch/arm/lib/testchangebit.S
+++ b/arch/arm/lib/testchangebit.S
@@ -12,6 +12,4 @@
12#include "bitops.h" 12#include "bitops.h"
13 .text 13 .text
14 14
15ENTRY(_test_and_change_bit) 15testop _test_and_change_bit, eor, str
16 testop eor, str
17ENDPROC(_test_and_change_bit)
diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S
index 521b66b5b95d..918841dcce7a 100644
--- a/arch/arm/lib/testclearbit.S
+++ b/arch/arm/lib/testclearbit.S
@@ -12,6 +12,4 @@
12#include "bitops.h" 12#include "bitops.h"
13 .text 13 .text
14 14
15ENTRY(_test_and_clear_bit) 15testop _test_and_clear_bit, bicne, strne
16 testop bicne, strne
17ENDPROC(_test_and_clear_bit)
diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S
index 1c98cc2185bb..8d1b2fe9e487 100644
--- a/arch/arm/lib/testsetbit.S
+++ b/arch/arm/lib/testsetbit.S
@@ -12,6 +12,4 @@
12#include "bitops.h" 12#include "bitops.h"
13 .text 13 .text
14 14
15ENTRY(_test_and_set_bit) 15testop _test_and_set_bit, orreq, streq
16 testop orreq, streq
17ENDPROC(_test_and_set_bit)
diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c
index a4401d6b5b07..adad70db70eb 100644
--- a/arch/arm/mach-at91/at91cap9_devices.c
+++ b/arch/arm/mach-at91/at91cap9_devices.c
@@ -98,7 +98,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
98 * USB HS Device (Gadget) 98 * USB HS Device (Gadget)
99 * -------------------------------------------------------------------- */ 99 * -------------------------------------------------------------------- */
100 100
101#if defined(CONFIG_USB_GADGET_ATMEL_USBA) || defined(CONFIG_USB_GADGET_ATMEL_USBA_MODULE) 101#if defined(CONFIG_USB_ATMEL_USBA) || defined(CONFIG_USB_ATMEL_USBA_MODULE)
102 102
103static struct resource usba_udc_resources[] = { 103static struct resource usba_udc_resources[] = {
104 [0] = { 104 [0] = {
@@ -1021,8 +1021,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
1021#if defined(CONFIG_SERIAL_ATMEL) 1021#if defined(CONFIG_SERIAL_ATMEL)
1022static struct resource dbgu_resources[] = { 1022static struct resource dbgu_resources[] = {
1023 [0] = { 1023 [0] = {
1024 .start = AT91_VA_BASE_SYS + AT91_DBGU, 1024 .start = AT91_BASE_SYS + AT91_DBGU,
1025 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, 1025 .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
1026 .flags = IORESOURCE_MEM, 1026 .flags = IORESOURCE_MEM,
1027 }, 1027 },
1028 [1] = { 1028 [1] = {
@@ -1035,7 +1035,6 @@ static struct resource dbgu_resources[] = {
1035static struct atmel_uart_data dbgu_data = { 1035static struct atmel_uart_data dbgu_data = {
1036 .use_dma_tx = 0, 1036 .use_dma_tx = 0,
1037 .use_dma_rx = 0, /* DBGU not capable of receive DMA */ 1037 .use_dma_rx = 0, /* DBGU not capable of receive DMA */
1038 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
1039}; 1038};
1040 1039
1041static u64 dbgu_dmamask = DMA_BIT_MASK(32); 1040static u64 dbgu_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 01d8bbd1468b..ad930688358c 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
83 * USB Device (Gadget) 83 * USB Device (Gadget)
84 * -------------------------------------------------------------------- */ 84 * -------------------------------------------------------------------- */
85 85
86#ifdef CONFIG_USB_GADGET_AT91 86#ifdef CONFIG_USB_AT91
87static struct at91_udc_data udc_data; 87static struct at91_udc_data udc_data;
88 88
89static struct resource udc_resources[] = { 89static struct resource udc_resources[] = {
@@ -877,8 +877,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
877#if defined(CONFIG_SERIAL_ATMEL) 877#if defined(CONFIG_SERIAL_ATMEL)
878static struct resource dbgu_resources[] = { 878static struct resource dbgu_resources[] = {
879 [0] = { 879 [0] = {
880 .start = AT91_VA_BASE_SYS + AT91_DBGU, 880 .start = AT91_BASE_SYS + AT91_DBGU,
881 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, 881 .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
882 .flags = IORESOURCE_MEM, 882 .flags = IORESOURCE_MEM,
883 }, 883 },
884 [1] = { 884 [1] = {
@@ -891,7 +891,6 @@ static struct resource dbgu_resources[] = {
891static struct atmel_uart_data dbgu_data = { 891static struct atmel_uart_data dbgu_data = {
892 .use_dma_tx = 0, 892 .use_dma_tx = 0,
893 .use_dma_rx = 0, /* DBGU not capable of receive DMA */ 893 .use_dma_rx = 0, /* DBGU not capable of receive DMA */
894 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
895}; 894};
896 895
897static u64 dbgu_dmamask = DMA_BIT_MASK(32); 896static u64 dbgu_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index b84a9f642f59..0d20677fbef0 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -195,9 +195,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
195 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk), 195 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
196 CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk), 196 CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
197 CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk), 197 CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
198 CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk), 198 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
199 CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk), 199 CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
200 CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk), 200 CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
201 CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk), 201 CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
202 /* more usart lookup table for DT entries */ 202 /* more usart lookup table for DT entries */
203 CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck), 203 CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck),
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 24b6f8c0440d..629fa9774972 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
84 * USB Device (Gadget) 84 * USB Device (Gadget)
85 * -------------------------------------------------------------------- */ 85 * -------------------------------------------------------------------- */
86 86
87#ifdef CONFIG_USB_GADGET_AT91 87#ifdef CONFIG_USB_AT91
88static struct at91_udc_data udc_data; 88static struct at91_udc_data udc_data;
89 89
90static struct resource udc_resources[] = { 90static struct resource udc_resources[] = {
@@ -837,8 +837,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
837#if defined(CONFIG_SERIAL_ATMEL) 837#if defined(CONFIG_SERIAL_ATMEL)
838static struct resource dbgu_resources[] = { 838static struct resource dbgu_resources[] = {
839 [0] = { 839 [0] = {
840 .start = AT91_VA_BASE_SYS + AT91_DBGU, 840 .start = AT91_BASE_SYS + AT91_DBGU,
841 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, 841 .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
842 .flags = IORESOURCE_MEM, 842 .flags = IORESOURCE_MEM,
843 }, 843 },
844 [1] = { 844 [1] = {
@@ -851,7 +851,6 @@ static struct resource dbgu_resources[] = {
851static struct atmel_uart_data dbgu_data = { 851static struct atmel_uart_data dbgu_data = {
852 .use_dma_tx = 0, 852 .use_dma_tx = 0,
853 .use_dma_rx = 0, /* DBGU not capable of receive DMA */ 853 .use_dma_rx = 0, /* DBGU not capable of receive DMA */
854 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
855}; 854};
856 855
857static u64 dbgu_dmamask = DMA_BIT_MASK(32); 856static u64 dbgu_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 3b70b3897d95..a178b58b0b9c 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
87 * USB Device (Gadget) 87 * USB Device (Gadget)
88 * -------------------------------------------------------------------- */ 88 * -------------------------------------------------------------------- */
89 89
90#ifdef CONFIG_USB_GADGET_AT91 90#ifdef CONFIG_USB_AT91
91static struct at91_udc_data udc_data; 91static struct at91_udc_data udc_data;
92 92
93static struct resource udc_resources[] = { 93static struct resource udc_resources[] = {
@@ -816,8 +816,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
816#if defined(CONFIG_SERIAL_ATMEL) 816#if defined(CONFIG_SERIAL_ATMEL)
817static struct resource dbgu_resources[] = { 817static struct resource dbgu_resources[] = {
818 [0] = { 818 [0] = {
819 .start = AT91_VA_BASE_SYS + AT91_DBGU, 819 .start = AT91_BASE_SYS + AT91_DBGU,
820 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, 820 .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
821 .flags = IORESOURCE_MEM, 821 .flags = IORESOURCE_MEM,
822 }, 822 },
823 [1] = { 823 [1] = {
@@ -830,7 +830,6 @@ static struct resource dbgu_resources[] = {
830static struct atmel_uart_data dbgu_data = { 830static struct atmel_uart_data dbgu_data = {
831 .use_dma_tx = 0, 831 .use_dma_tx = 0,
832 .use_dma_rx = 0, /* DBGU not capable of receive DMA */ 832 .use_dma_rx = 0, /* DBGU not capable of receive DMA */
833 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
834}; 833};
835 834
836static u64 dbgu_dmamask = DMA_BIT_MASK(32); 835static u64 dbgu_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index 3faa1fde9ad9..d5fbac9ff4fa 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
92 * USB Device (Gadget) 92 * USB Device (Gadget)
93 * -------------------------------------------------------------------- */ 93 * -------------------------------------------------------------------- */
94 94
95#ifdef CONFIG_USB_GADGET_AT91 95#ifdef CONFIG_USB_AT91
96static struct at91_udc_data udc_data; 96static struct at91_udc_data udc_data;
97 97
98static struct resource udc_resources[] = { 98static struct resource udc_resources[] = {
@@ -1196,8 +1196,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
1196 1196
1197static struct resource dbgu_resources[] = { 1197static struct resource dbgu_resources[] = {
1198 [0] = { 1198 [0] = {
1199 .start = AT91_VA_BASE_SYS + AT91_DBGU, 1199 .start = AT91_BASE_SYS + AT91_DBGU,
1200 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, 1200 .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
1201 .flags = IORESOURCE_MEM, 1201 .flags = IORESOURCE_MEM,
1202 }, 1202 },
1203 [1] = { 1203 [1] = {
@@ -1210,7 +1210,6 @@ static struct resource dbgu_resources[] = {
1210static struct atmel_uart_data dbgu_data = { 1210static struct atmel_uart_data dbgu_data = {
1211 .use_dma_tx = 0, 1211 .use_dma_tx = 0,
1212 .use_dma_rx = 0, /* DBGU not capable of receive DMA */ 1212 .use_dma_rx = 0, /* DBGU not capable of receive DMA */
1213 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
1214}; 1213};
1215 1214
1216static u64 dbgu_dmamask = DMA_BIT_MASK(32); 1215static u64 dbgu_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 000b5e1da965..09a16d6bd5cd 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -197,7 +197,7 @@ void __init at91_add_device_usbh_ehci(struct at91_usbh_data *data) {}
197 * USB HS Device (Gadget) 197 * USB HS Device (Gadget)
198 * -------------------------------------------------------------------- */ 198 * -------------------------------------------------------------------- */
199 199
200#if defined(CONFIG_USB_GADGET_ATMEL_USBA) || defined(CONFIG_USB_GADGET_ATMEL_USBA_MODULE) 200#if defined(CONFIG_USB_ATMEL_USBA) || defined(CONFIG_USB_ATMEL_USBA_MODULE)
201static struct resource usba_udc_resources[] = { 201static struct resource usba_udc_resources[] = {
202 [0] = { 202 [0] = {
203 .start = AT91SAM9G45_UDPHS_FIFO, 203 .start = AT91SAM9G45_UDPHS_FIFO,
@@ -1332,8 +1332,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
1332#if defined(CONFIG_SERIAL_ATMEL) 1332#if defined(CONFIG_SERIAL_ATMEL)
1333static struct resource dbgu_resources[] = { 1333static struct resource dbgu_resources[] = {
1334 [0] = { 1334 [0] = {
1335 .start = AT91_VA_BASE_SYS + AT91_DBGU, 1335 .start = AT91_BASE_SYS + AT91_DBGU,
1336 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, 1336 .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
1337 .flags = IORESOURCE_MEM, 1337 .flags = IORESOURCE_MEM,
1338 }, 1338 },
1339 [1] = { 1339 [1] = {
@@ -1346,7 +1346,6 @@ static struct resource dbgu_resources[] = {
1346static struct atmel_uart_data dbgu_data = { 1346static struct atmel_uart_data dbgu_data = {
1347 .use_dma_tx = 0, 1347 .use_dma_tx = 0,
1348 .use_dma_rx = 0, 1348 .use_dma_rx = 0,
1349 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
1350}; 1349};
1351 1350
1352static u64 dbgu_dmamask = DMA_BIT_MASK(32); 1351static u64 dbgu_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index 305a851b5bff..628eb566d60c 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -75,7 +75,7 @@ void __init at91_add_device_hdmac(void) {}
75 * USB HS Device (Gadget) 75 * USB HS Device (Gadget)
76 * -------------------------------------------------------------------- */ 76 * -------------------------------------------------------------------- */
77 77
78#if defined(CONFIG_USB_GADGET_ATMEL_USBA) || defined(CONFIG_USB_GADGET_ATMEL_USBA_MODULE) 78#if defined(CONFIG_USB_ATMEL_USBA) || defined(CONFIG_USB_ATMEL_USBA_MODULE)
79 79
80static struct resource usba_udc_resources[] = { 80static struct resource usba_udc_resources[] = {
81 [0] = { 81 [0] = {
@@ -908,8 +908,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
908#if defined(CONFIG_SERIAL_ATMEL) 908#if defined(CONFIG_SERIAL_ATMEL)
909static struct resource dbgu_resources[] = { 909static struct resource dbgu_resources[] = {
910 [0] = { 910 [0] = {
911 .start = AT91_VA_BASE_SYS + AT91_DBGU, 911 .start = AT91_BASE_SYS + AT91_DBGU,
912 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, 912 .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
913 .flags = IORESOURCE_MEM, 913 .flags = IORESOURCE_MEM,
914 }, 914 },
915 [1] = { 915 [1] = {
@@ -922,7 +922,6 @@ static struct resource dbgu_resources[] = {
922static struct atmel_uart_data dbgu_data = { 922static struct atmel_uart_data dbgu_data = {
923 .use_dma_tx = 0, 923 .use_dma_tx = 0,
924 .use_dma_rx = 0, /* DBGU not capable of receive DMA */ 924 .use_dma_rx = 0, /* DBGU not capable of receive DMA */
925 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
926}; 925};
927 926
928static u64 dbgu_dmamask = DMA_BIT_MASK(32); 927static u64 dbgu_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-at91/board-yl-9200.c b/arch/arm/mach-at91/board-yl-9200.c
index 649b052231f5..12a3f955162b 100644
--- a/arch/arm/mach-at91/board-yl-9200.c
+++ b/arch/arm/mach-at91/board-yl-9200.c
@@ -384,7 +384,7 @@ static struct spi_board_info yl9200_spi_devices[] = {
384#include <video/s1d13xxxfb.h> 384#include <video/s1d13xxxfb.h>
385 385
386 386
387static void __init yl9200_init_video(void) 387static void yl9200_init_video(void)
388{ 388{
389 /* NWAIT Signal */ 389 /* NWAIT Signal */
390 at91_set_A_periph(AT91_PIN_PC6, 0); 390 at91_set_A_periph(AT91_PIN_PC6, 0);
diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h
index 8f4866045b41..ec164a4124c9 100644
--- a/arch/arm/mach-at91/include/mach/system_rev.h
+++ b/arch/arm/mach-at91/include/mach/system_rev.h
@@ -19,7 +19,7 @@
19#define BOARD_HAVE_NAND_16BIT (1 << 31) 19#define BOARD_HAVE_NAND_16BIT (1 << 31)
20static inline int board_have_nand_16bit(void) 20static inline int board_have_nand_16bit(void)
21{ 21{
22 return system_rev & BOARD_HAVE_NAND_16BIT; 22 return (system_rev & BOARD_HAVE_NAND_16BIT) ? 1 : 0;
23} 23}
24 24
25#endif /* __ARCH_SYSTEM_REV_H__ */ 25#endif /* __ARCH_SYSTEM_REV_H__ */
diff --git a/arch/arm/mach-at91/include/mach/vmalloc.h b/arch/arm/mach-at91/include/mach/vmalloc.h
index 8eb459f3f5b7..8e4a1bd0ab1d 100644
--- a/arch/arm/mach-at91/include/mach/vmalloc.h
+++ b/arch/arm/mach-at91/include/mach/vmalloc.h
@@ -21,6 +21,8 @@
21#ifndef __ASM_ARCH_VMALLOC_H 21#ifndef __ASM_ARCH_VMALLOC_H
22#define __ASM_ARCH_VMALLOC_H 22#define __ASM_ARCH_VMALLOC_H
23 23
24#include <mach/hardware.h>
25
24#define VMALLOC_END (AT91_VIRT_BASE & PGDIR_MASK) 26#define VMALLOC_END (AT91_VIRT_BASE & PGDIR_MASK)
25 27
26#endif 28#endif
diff --git a/arch/arm/mach-bcmring/core.c b/arch/arm/mach-bcmring/core.c
index 43eadbcc29ed..430da120a297 100644
--- a/arch/arm/mach-bcmring/core.c
+++ b/arch/arm/mach-bcmring/core.c
@@ -235,7 +235,7 @@ void __init bcmring_init_timer(void)
235 */ 235 */
236 bcmring_clocksource_init(); 236 bcmring_clocksource_init();
237 237
238 sp804_clockevents_register(TIMER0_VA_BASE, IRQ_TIMER0, "timer0"); 238 sp804_clockevents_init(TIMER0_VA_BASE, IRQ_TIMER0, "timer0");
239} 239}
240 240
241struct sys_timer bcmring_timer = { 241struct sys_timer bcmring_timer = {
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index b52b8de91bde..f4d4d6d174d0 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -36,6 +36,7 @@
36#include <linux/mm.h> 36#include <linux/mm.h>
37#include <linux/pfn.h> 37#include <linux/pfn.h>
38#include <linux/atomic.h> 38#include <linux/atomic.h>
39#include <linux/sched.h>
39#include <mach/dma.h> 40#include <mach/dma.h>
40 41
41/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */ 42/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 1d7d24995226..6659a90dbcad 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -753,7 +753,7 @@ static struct snd_platform_data da850_evm_snd_data = {
753 .num_serializer = ARRAY_SIZE(da850_iis_serializer_direction), 753 .num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
754 .tdm_slots = 2, 754 .tdm_slots = 2,
755 .serial_dir = da850_iis_serializer_direction, 755 .serial_dir = da850_iis_serializer_direction,
756 .asp_chan_q = EVENTQ_1, 756 .asp_chan_q = EVENTQ_0,
757 .version = MCASP_VERSION_2, 757 .version = MCASP_VERSION_2,
758 .txnumevt = 1, 758 .txnumevt = 1,
759 .rxnumevt = 1, 759 .rxnumevt = 1,
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 1918ae711428..46e1f4173b97 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -107,7 +107,7 @@ static struct mtd_partition davinci_nand_partitions[] = {
107 /* UBL (a few copies) plus U-Boot */ 107 /* UBL (a few copies) plus U-Boot */
108 .name = "bootloader", 108 .name = "bootloader",
109 .offset = 0, 109 .offset = 0,
110 .size = 28 * NAND_BLOCK_SIZE, 110 .size = 30 * NAND_BLOCK_SIZE,
111 .mask_flags = MTD_WRITEABLE, /* force read-only */ 111 .mask_flags = MTD_WRITEABLE, /* force read-only */
112 }, { 112 }, {
113 /* U-Boot environment */ 113 /* U-Boot environment */
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index e574d7f837a8..635bf7740157 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -564,7 +564,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
564 int val; 564 int val;
565 u32 value; 565 u32 value;
566 566
567 if (!vpif_vsclkdis_reg || !cpld_client) 567 if (!vpif_vidclkctl_reg || !cpld_client)
568 return -ENXIO; 568 return -ENXIO;
569 569
570 val = i2c_smbus_read_byte(cpld_client); 570 val = i2c_smbus_read_byte(cpld_client);
@@ -572,7 +572,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
572 return val; 572 return val;
573 573
574 spin_lock_irqsave(&vpif_reg_lock, flags); 574 spin_lock_irqsave(&vpif_reg_lock, flags);
575 value = __raw_readl(vpif_vsclkdis_reg); 575 value = __raw_readl(vpif_vidclkctl_reg);
576 if (mux_mode) { 576 if (mux_mode) {
577 val &= VPIF_INPUT_TWO_CHANNEL; 577 val &= VPIF_INPUT_TWO_CHANNEL;
578 value |= VIDCH1CLK; 578 value |= VIDCH1CLK;
@@ -580,7 +580,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
580 val |= VPIF_INPUT_ONE_CHANNEL; 580 val |= VPIF_INPUT_ONE_CHANNEL;
581 value &= ~VIDCH1CLK; 581 value &= ~VIDCH1CLK;
582 } 582 }
583 __raw_writel(value, vpif_vsclkdis_reg); 583 __raw_writel(value, vpif_vidclkctl_reg);
584 spin_unlock_irqrestore(&vpif_reg_lock, flags); 584 spin_unlock_irqrestore(&vpif_reg_lock, flags);
585 585
586 err = i2c_smbus_write_byte(cpld_client, val); 586 err = i2c_smbus_write_byte(cpld_client, val);
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 0b68ed534f8e..af27c130595f 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -161,7 +161,6 @@ static struct clk dsp_clk = {
161 .name = "dsp", 161 .name = "dsp",
162 .parent = &pll1_sysclk1, 162 .parent = &pll1_sysclk1,
163 .lpsc = DM646X_LPSC_C64X_CPU, 163 .lpsc = DM646X_LPSC_C64X_CPU,
164 .flags = PSC_DSP,
165 .usecount = 1, /* REVISIT how to disable? */ 164 .usecount = 1, /* REVISIT how to disable? */
166}; 165};
167 166
diff --git a/arch/arm/mach-davinci/include/mach/psc.h b/arch/arm/mach-davinci/include/mach/psc.h
index fa59c097223d..8bc3fc256171 100644
--- a/arch/arm/mach-davinci/include/mach/psc.h
+++ b/arch/arm/mach-davinci/include/mach/psc.h
@@ -233,7 +233,7 @@
233#define PTCMD 0x120 233#define PTCMD 0x120
234#define PTSTAT 0x128 234#define PTSTAT 0x128
235#define PDSTAT 0x200 235#define PDSTAT 0x200
236#define PDCTL1 0x304 236#define PDCTL 0x300
237#define MDSTAT 0x800 237#define MDSTAT 0x800
238#define MDCTL 0xA00 238#define MDCTL 0xA00
239 239
@@ -244,7 +244,10 @@
244#define PSC_STATE_ENABLE 3 244#define PSC_STATE_ENABLE 3
245 245
246#define MDSTAT_STATE_MASK 0x3f 246#define MDSTAT_STATE_MASK 0x3f
247#define PDSTAT_STATE_MASK 0x1f
247#define MDCTL_FORCE BIT(31) 248#define MDCTL_FORCE BIT(31)
249#define PDCTL_NEXT BIT(1)
250#define PDCTL_EPCGOOD BIT(8)
248 251
249#ifndef __ASSEMBLER__ 252#ifndef __ASSEMBLER__
250 253
diff --git a/arch/arm/mach-davinci/psc.c b/arch/arm/mach-davinci/psc.c
index 1fb6bdff38c1..d7e210f4b55c 100644
--- a/arch/arm/mach-davinci/psc.c
+++ b/arch/arm/mach-davinci/psc.c
@@ -52,7 +52,7 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
52void davinci_psc_config(unsigned int domain, unsigned int ctlr, 52void davinci_psc_config(unsigned int domain, unsigned int ctlr,
53 unsigned int id, bool enable, u32 flags) 53 unsigned int id, bool enable, u32 flags)
54{ 54{
55 u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl; 55 u32 epcpr, ptcmd, ptstat, pdstat, pdctl, mdstat, mdctl;
56 void __iomem *psc_base; 56 void __iomem *psc_base;
57 struct davinci_soc_info *soc_info = &davinci_soc_info; 57 struct davinci_soc_info *soc_info = &davinci_soc_info;
58 u32 next_state = PSC_STATE_ENABLE; 58 u32 next_state = PSC_STATE_ENABLE;
@@ -79,11 +79,11 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
79 mdctl |= MDCTL_FORCE; 79 mdctl |= MDCTL_FORCE;
80 __raw_writel(mdctl, psc_base + MDCTL + 4 * id); 80 __raw_writel(mdctl, psc_base + MDCTL + 4 * id);
81 81
82 pdstat = __raw_readl(psc_base + PDSTAT); 82 pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain);
83 if ((pdstat & 0x00000001) == 0) { 83 if ((pdstat & PDSTAT_STATE_MASK) == 0) {
84 pdctl1 = __raw_readl(psc_base + PDCTL1); 84 pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
85 pdctl1 |= 0x1; 85 pdctl |= PDCTL_NEXT;
86 __raw_writel(pdctl1, psc_base + PDCTL1); 86 __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
87 87
88 ptcmd = 1 << domain; 88 ptcmd = 1 << domain;
89 __raw_writel(ptcmd, psc_base + PTCMD); 89 __raw_writel(ptcmd, psc_base + PTCMD);
@@ -92,9 +92,9 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
92 epcpr = __raw_readl(psc_base + EPCPR); 92 epcpr = __raw_readl(psc_base + EPCPR);
93 } while ((((epcpr >> domain) & 1) == 0)); 93 } while ((((epcpr >> domain) & 1) == 0));
94 94
95 pdctl1 = __raw_readl(psc_base + PDCTL1); 95 pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
96 pdctl1 |= 0x100; 96 pdctl |= PDCTL_EPCGOOD;
97 __raw_writel(pdctl1, psc_base + PDCTL1); 97 __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
98 } else { 98 } else {
99 ptcmd = 1 << domain; 99 ptcmd = 1 << domain;
100 __raw_writel(ptcmd, psc_base + PTCMD); 100 __raw_writel(ptcmd, psc_base + PTCMD);
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 35f6502144ae..4ebb382c5979 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -12,6 +12,8 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/cpuidle.h> 13#include <linux/cpuidle.h>
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/export.h>
16#include <linux/time.h>
15 17
16#include <asm/proc-fns.h> 18#include <asm/proc-fns.h>
17 19
diff --git a/arch/arm/mach-exynos/mct.c b/arch/arm/mach-exynos/mct.c
index 97343df8f132..85b5527d0918 100644
--- a/arch/arm/mach-exynos/mct.c
+++ b/arch/arm/mach-exynos/mct.c
@@ -44,8 +44,6 @@ struct mct_clock_event_device {
44 char name[10]; 44 char name[10];
45}; 45};
46 46
47static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
48
49static void exynos4_mct_write(unsigned int value, void *addr) 47static void exynos4_mct_write(unsigned int value, void *addr)
50{ 48{
51 void __iomem *stat_addr; 49 void __iomem *stat_addr;
@@ -264,6 +262,9 @@ static void exynos4_clockevent_init(void)
264} 262}
265 263
266#ifdef CONFIG_LOCAL_TIMERS 264#ifdef CONFIG_LOCAL_TIMERS
265
266static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
267
267/* Clock event handling */ 268/* Clock event handling */
268static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt) 269static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
269{ 270{
@@ -428,9 +429,13 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
428 429
429void local_timer_stop(struct clock_event_device *evt) 430void local_timer_stop(struct clock_event_device *evt)
430{ 431{
432 unsigned int cpu = smp_processor_id();
431 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); 433 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
432 if (mct_int_type == MCT_INT_SPI) 434 if (mct_int_type == MCT_INT_SPI)
433 disable_irq(evt->irq); 435 if (cpu == 0)
436 remove_irq(evt->irq, &mct_tick0_event_irq);
437 else
438 remove_irq(evt->irq, &mct_tick1_event_irq);
434 else 439 else
435 disable_percpu_irq(IRQ_MCT_LOCALTIMER); 440 disable_percpu_irq(IRQ_MCT_LOCALTIMER);
436} 441}
@@ -443,6 +448,7 @@ static void __init exynos4_timer_resources(void)
443 448
444 clk_rate = clk_get_rate(mct_clk); 449 clk_rate = clk_get_rate(mct_clk);
445 450
451#ifdef CONFIG_LOCAL_TIMERS
446 if (mct_int_type == MCT_INT_PPI) { 452 if (mct_int_type == MCT_INT_PPI) {
447 int err; 453 int err;
448 454
@@ -452,6 +458,7 @@ static void __init exynos4_timer_resources(void)
452 WARN(err, "MCT: can't request IRQ %d (%d)\n", 458 WARN(err, "MCT: can't request IRQ %d (%d)\n",
453 IRQ_MCT_LOCALTIMER, err); 459 IRQ_MCT_LOCALTIMER, err);
454 } 460 }
461#endif /* CONFIG_LOCAL_TIMERS */
455} 462}
456 463
457static void __init exynos4_timer_init(void) 464static void __init exynos4_timer_init(void)
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index b82dcf08e747..88660d500f5b 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -22,6 +22,7 @@
22#include <linux/of_irq.h> 22#include <linux/of_irq.h>
23#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#include <linux/of_address.h> 24#include <linux/of_address.h>
25#include <linux/smp.h>
25 26
26#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
27#include <asm/unified.h> 28#include <asm/unified.h>
@@ -72,6 +73,9 @@ static void __init highbank_map_io(void)
72 73
73void highbank_set_cpu_jump(int cpu, void *jump_addr) 74void highbank_set_cpu_jump(int cpu, void *jump_addr)
74{ 75{
76#ifdef CONFIG_SMP
77 cpu = cpu_logical_map(cpu);
78#endif
75 writel(BSYM(virt_to_phys(jump_addr)), HB_JUMP_TABLE_VIRT(cpu)); 79 writel(BSYM(virt_to_phys(jump_addr)), HB_JUMP_TABLE_VIRT(cpu));
76 __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); 80 __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
77 outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), 81 outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 5f7f9c2a34ae..c44aa974e79c 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -10,11 +10,6 @@ config HAVE_IMX_MMDC
10config HAVE_IMX_SRC 10config HAVE_IMX_SRC
11 bool 11 bool
12 12
13#
14# ARCH_MX31 and ARCH_MX35 are left for compatibility
15# Some usages assume that having one of them implies not having (e.g.) ARCH_MX2.
16# To easily distinguish good and reviewed from unreviewed usages new (and IMHO
17# more sensible) names are used: SOC_IMX31 and SOC_IMX35
18config ARCH_MX1 13config ARCH_MX1
19 bool 14 bool
20 15
@@ -27,12 +22,6 @@ config ARCH_MX25
27config MACH_MX27 22config MACH_MX27
28 bool 23 bool
29 24
30config ARCH_MX31
31 bool
32
33config ARCH_MX35
34 bool
35
36config SOC_IMX1 25config SOC_IMX1
37 bool 26 bool
38 select ARCH_MX1 27 select ARCH_MX1
@@ -72,7 +61,6 @@ config SOC_IMX31
72 select CPU_V6 61 select CPU_V6
73 select IMX_HAVE_PLATFORM_MXC_RNGA 62 select IMX_HAVE_PLATFORM_MXC_RNGA
74 select ARCH_MXC_AUDMUX_V2 63 select ARCH_MXC_AUDMUX_V2
75 select ARCH_MX31
76 select MXC_AVIC 64 select MXC_AVIC
77 select SMP_ON_UP if SMP 65 select SMP_ON_UP if SMP
78 66
@@ -82,7 +70,6 @@ config SOC_IMX35
82 select ARCH_MXC_IOMUX_V3 70 select ARCH_MXC_IOMUX_V3
83 select ARCH_MXC_AUDMUX_V2 71 select ARCH_MXC_AUDMUX_V2
84 select HAVE_EPIT 72 select HAVE_EPIT
85 select ARCH_MX35
86 select MXC_AVIC 73 select MXC_AVIC
87 select SMP_ON_UP if SMP 74 select SMP_ON_UP if SMP
88 75
diff --git a/arch/arm/mach-imx/Makefile.boot b/arch/arm/mach-imx/Makefile.boot
index 22d85889f622..cfede5768aa0 100644
--- a/arch/arm/mach-imx/Makefile.boot
+++ b/arch/arm/mach-imx/Makefile.boot
@@ -1,22 +1,26 @@
1zreladdr-$(CONFIG_ARCH_MX1) += 0x08008000 1zreladdr-$(CONFIG_SOC_IMX1) += 0x08008000
2params_phys-$(CONFIG_ARCH_MX1) := 0x08000100 2params_phys-$(CONFIG_SOC_IMX1) := 0x08000100
3initrd_phys-$(CONFIG_ARCH_MX1) := 0x08800000 3initrd_phys-$(CONFIG_SOC_IMX1) := 0x08800000
4 4
5zreladdr-$(CONFIG_MACH_MX21) += 0xC0008000 5zreladdr-$(CONFIG_SOC_IMX21) += 0xC0008000
6params_phys-$(CONFIG_MACH_MX21) := 0xC0000100 6params_phys-$(CONFIG_SOC_IMX21) := 0xC0000100
7initrd_phys-$(CONFIG_MACH_MX21) := 0xC0800000 7initrd_phys-$(CONFIG_SOC_IMX21) := 0xC0800000
8 8
9zreladdr-$(CONFIG_ARCH_MX25) += 0x80008000 9zreladdr-$(CONFIG_SOC_IMX25) += 0x80008000
10params_phys-$(CONFIG_ARCH_MX25) := 0x80000100 10params_phys-$(CONFIG_SOC_IMX25) := 0x80000100
11initrd_phys-$(CONFIG_ARCH_MX25) := 0x80800000 11initrd_phys-$(CONFIG_SOC_IMX25) := 0x80800000
12 12
13zreladdr-$(CONFIG_MACH_MX27) += 0xA0008000 13zreladdr-$(CONFIG_SOC_IMX27) += 0xA0008000
14params_phys-$(CONFIG_MACH_MX27) := 0xA0000100 14params_phys-$(CONFIG_SOC_IMX27) := 0xA0000100
15initrd_phys-$(CONFIG_MACH_MX27) := 0xA0800000 15initrd_phys-$(CONFIG_SOC_IMX27) := 0xA0800000
16 16
17zreladdr-$(CONFIG_ARCH_MX3) += 0x80008000 17zreladdr-$(CONFIG_SOC_IMX31) += 0x80008000
18params_phys-$(CONFIG_ARCH_MX3) := 0x80000100 18params_phys-$(CONFIG_SOC_IMX31) := 0x80000100
19initrd_phys-$(CONFIG_ARCH_MX3) := 0x80800000 19initrd_phys-$(CONFIG_SOC_IMX31) := 0x80800000
20
21zreladdr-$(CONFIG_SOC_IMX35) += 0x80008000
22params_phys-$(CONFIG_SOC_IMX35) := 0x80000100
23initrd_phys-$(CONFIG_SOC_IMX35) := 0x80800000
20 24
21zreladdr-$(CONFIG_SOC_IMX6Q) += 0x10008000 25zreladdr-$(CONFIG_SOC_IMX6Q) += 0x10008000
22params_phys-$(CONFIG_SOC_IMX6Q) := 0x10000100 26params_phys-$(CONFIG_SOC_IMX6Q) := 0x10000100
diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c
index e0b926dfeced..039a7abb165a 100644
--- a/arch/arm/mach-imx/clock-imx6q.c
+++ b/arch/arm/mach-imx/clock-imx6q.c
@@ -1139,7 +1139,7 @@ static int _clk_set_rate(struct clk *clk, unsigned long rate)
1139 return -EINVAL; 1139 return -EINVAL;
1140 1140
1141 max_div = ((d->bm_pred >> d->bp_pred) + 1) * 1141 max_div = ((d->bm_pred >> d->bp_pred) + 1) *
1142 ((d->bm_pred >> d->bp_pred) + 1); 1142 ((d->bm_podf >> d->bp_podf) + 1);
1143 1143
1144 div = parent_rate / rate; 1144 div = parent_rate / rate;
1145 if (div == 0) 1145 if (div == 0)
@@ -1953,14 +1953,17 @@ static struct map_desc imx6q_clock_desc[] = {
1953 imx_map_entry(MX6Q, ANATOP, MT_DEVICE), 1953 imx_map_entry(MX6Q, ANATOP, MT_DEVICE),
1954}; 1954};
1955 1955
1956void __init imx6q_clock_map_io(void)
1957{
1958 iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
1959}
1960
1956int __init mx6q_clocks_init(void) 1961int __init mx6q_clocks_init(void)
1957{ 1962{
1958 struct device_node *np; 1963 struct device_node *np;
1959 void __iomem *base; 1964 void __iomem *base;
1960 int i, irq; 1965 int i, irq;
1961 1966
1962 iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
1963
1964 /* retrieve the freqency of fixed clocks from device tree */ 1967 /* retrieve the freqency of fixed clocks from device tree */
1965 for_each_compatible_node(np, NULL, "fixed-clock") { 1968 for_each_compatible_node(np, NULL, "fixed-clock") {
1966 u32 rate; 1969 u32 rate;
@@ -2002,6 +2005,21 @@ int __init mx6q_clocks_init(void)
2002 clk_set_rate(&asrc_serial_clk, 1500000); 2005 clk_set_rate(&asrc_serial_clk, 1500000);
2003 clk_set_rate(&enfc_clk, 11000000); 2006 clk_set_rate(&enfc_clk, 11000000);
2004 2007
2008 /*
2009 * Before pinctrl API is available, we have to rely on the pad
2010 * configuration set up by bootloader. For usdhc example here,
2011 * u-boot sets up the pads for 49.5 MHz case, and we have to lower
2012 * the usdhc clock from 198 to 49.5 MHz to match the pad configuration.
2013 *
2014 * FIXME: This is should be removed after pinctrl API is available.
2015 * At that time, usdhc driver can call pinctrl API to change pad
2016 * configuration dynamically per different usdhc clock settings.
2017 */
2018 clk_set_rate(&usdhc1_clk, 49500000);
2019 clk_set_rate(&usdhc2_clk, 49500000);
2020 clk_set_rate(&usdhc3_clk, 49500000);
2021 clk_set_rate(&usdhc4_clk, 49500000);
2022
2005 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); 2023 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
2006 base = of_iomap(np, 0); 2024 base = of_iomap(np, 0);
2007 WARN_ON(!base); 2025 WARN_ON(!base);
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 8bf5fa349484..8deb012189b5 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -34,16 +34,18 @@ static void __init imx6q_map_io(void)
34{ 34{
35 imx_lluart_map_io(); 35 imx_lluart_map_io();
36 imx_scu_map_io(); 36 imx_scu_map_io();
37 imx6q_clock_map_io();
37} 38}
38 39
39static void __init imx6q_gpio_add_irq_domain(struct device_node *np, 40static int __init imx6q_gpio_add_irq_domain(struct device_node *np,
40 struct device_node *interrupt_parent) 41 struct device_node *interrupt_parent)
41{ 42{
42 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS - 43 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
43 32 * 7; /* imx6q gets 7 gpio ports */
44 44
45 gpio_irq_base -= 32;
45 irq_domain_add_simple(np, gpio_irq_base); 46 irq_domain_add_simple(np, gpio_irq_base);
46 gpio_irq_base += 32; 47
48 return 0;
47} 49}
48 50
49static const struct of_device_id imx6q_irq_match[] __initconst = { 51static const struct of_device_id imx6q_irq_match[] __initconst = {
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 9f0e82ec3398..31807d2a8b7b 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -33,29 +33,32 @@
33static void imx3_idle(void) 33static void imx3_idle(void)
34{ 34{
35 unsigned long reg = 0; 35 unsigned long reg = 0;
36 __asm__ __volatile__( 36
37 /* disable I and D cache */ 37 if (!need_resched())
38 "mrc p15, 0, %0, c1, c0, 0\n" 38 __asm__ __volatile__(
39 "bic %0, %0, #0x00001000\n" 39 /* disable I and D cache */
40 "bic %0, %0, #0x00000004\n" 40 "mrc p15, 0, %0, c1, c0, 0\n"
41 "mcr p15, 0, %0, c1, c0, 0\n" 41 "bic %0, %0, #0x00001000\n"
42 /* invalidate I cache */ 42 "bic %0, %0, #0x00000004\n"
43 "mov %0, #0\n" 43 "mcr p15, 0, %0, c1, c0, 0\n"
44 "mcr p15, 0, %0, c7, c5, 0\n" 44 /* invalidate I cache */
45 /* clear and invalidate D cache */ 45 "mov %0, #0\n"
46 "mov %0, #0\n" 46 "mcr p15, 0, %0, c7, c5, 0\n"
47 "mcr p15, 0, %0, c7, c14, 0\n" 47 /* clear and invalidate D cache */
48 /* WFI */ 48 "mov %0, #0\n"
49 "mov %0, #0\n" 49 "mcr p15, 0, %0, c7, c14, 0\n"
50 "mcr p15, 0, %0, c7, c0, 4\n" 50 /* WFI */
51 "nop\n" "nop\n" "nop\n" "nop\n" 51 "mov %0, #0\n"
52 "nop\n" "nop\n" "nop\n" 52 "mcr p15, 0, %0, c7, c0, 4\n"
53 /* enable I and D cache */ 53 "nop\n" "nop\n" "nop\n" "nop\n"
54 "mrc p15, 0, %0, c1, c0, 0\n" 54 "nop\n" "nop\n" "nop\n"
55 "orr %0, %0, #0x00001000\n" 55 /* enable I and D cache */
56 "orr %0, %0, #0x00000004\n" 56 "mrc p15, 0, %0, c1, c0, 0\n"
57 "mcr p15, 0, %0, c1, c0, 0\n" 57 "orr %0, %0, #0x00001000\n"
58 : "=r" (reg)); 58 "orr %0, %0, #0x00000004\n"
59 "mcr p15, 0, %0, c1, c0, 0\n"
60 : "=r" (reg));
61 local_irq_enable();
59} 62}
60 63
61static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size, 64static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size,
@@ -108,6 +111,7 @@ void imx3_init_l2x0(void)
108 l2x0_init(l2x0_base, 0x00030024, 0x00000000); 111 l2x0_init(l2x0_base, 0x00030024, 0x00000000);
109} 112}
110 113
114#ifdef CONFIG_SOC_IMX31
111static struct map_desc mx31_io_desc[] __initdata = { 115static struct map_desc mx31_io_desc[] __initdata = {
112 imx_map_entry(MX31, X_MEMC, MT_DEVICE), 116 imx_map_entry(MX31, X_MEMC, MT_DEVICE),
113 imx_map_entry(MX31, AVIC, MT_DEVICE_NONSHARED), 117 imx_map_entry(MX31, AVIC, MT_DEVICE_NONSHARED),
@@ -126,33 +130,11 @@ void __init mx31_map_io(void)
126 iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc)); 130 iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc));
127} 131}
128 132
129static struct map_desc mx35_io_desc[] __initdata = {
130 imx_map_entry(MX35, X_MEMC, MT_DEVICE),
131 imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED),
132 imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED),
133 imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED),
134 imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED),
135};
136
137void __init mx35_map_io(void)
138{
139 iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
140}
141
142void __init imx31_init_early(void) 133void __init imx31_init_early(void)
143{ 134{
144 mxc_set_cpu_type(MXC_CPU_MX31); 135 mxc_set_cpu_type(MXC_CPU_MX31);
145 mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR)); 136 mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
146 imx_idle = imx3_idle; 137 pm_idle = imx3_idle;
147 imx_ioremap = imx3_ioremap;
148}
149
150void __init imx35_init_early(void)
151{
152 mxc_set_cpu_type(MXC_CPU_MX35);
153 mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
154 mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
155 imx_idle = imx3_idle;
156 imx_ioremap = imx3_ioremap; 138 imx_ioremap = imx3_ioremap;
157} 139}
158 140
@@ -161,11 +143,6 @@ void __init mx31_init_irq(void)
161 mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR)); 143 mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR));
162} 144}
163 145
164void __init mx35_init_irq(void)
165{
166 mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
167}
168
169static struct sdma_script_start_addrs imx31_to1_sdma_script __initdata = { 146static struct sdma_script_start_addrs imx31_to1_sdma_script __initdata = {
170 .per_2_per_addr = 1677, 147 .per_2_per_addr = 1677,
171}; 148};
@@ -199,6 +176,35 @@ void __init imx31_soc_init(void)
199 176
200 imx_add_imx_sdma("imx31-sdma", MX31_SDMA_BASE_ADDR, MX31_INT_SDMA, &imx31_sdma_pdata); 177 imx_add_imx_sdma("imx31-sdma", MX31_SDMA_BASE_ADDR, MX31_INT_SDMA, &imx31_sdma_pdata);
201} 178}
179#endif /* ifdef CONFIG_SOC_IMX31 */
180
181#ifdef CONFIG_SOC_IMX35
182static struct map_desc mx35_io_desc[] __initdata = {
183 imx_map_entry(MX35, X_MEMC, MT_DEVICE),
184 imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED),
185 imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED),
186 imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED),
187 imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED),
188};
189
190void __init mx35_map_io(void)
191{
192 iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
193}
194
195void __init imx35_init_early(void)
196{
197 mxc_set_cpu_type(MXC_CPU_MX35);
198 mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
199 mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
200 pm_idle = imx3_idle;
201 imx_ioremap = imx3_ioremap;
202}
203
204void __init mx35_init_irq(void)
205{
206 mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
207}
202 208
203static struct sdma_script_start_addrs imx35_to1_sdma_script __initdata = { 209static struct sdma_script_start_addrs imx35_to1_sdma_script __initdata = {
204 .ap_2_ap_addr = 642, 210 .ap_2_ap_addr = 642,
@@ -254,3 +260,4 @@ void __init imx35_soc_init(void)
254 260
255 imx_add_imx_sdma("imx35-sdma", MX35_SDMA_BASE_ADDR, MX35_INT_SDMA, &imx35_sdma_pdata); 261 imx_add_imx_sdma("imx35-sdma", MX35_SDMA_BASE_ADDR, MX35_INT_SDMA, &imx35_sdma_pdata);
256} 262}
263#endif /* ifdef CONFIG_SOC_IMX35 */
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c
index 36cacbd0dcc2..a8e33681b732 100644
--- a/arch/arm/mach-imx/src.c
+++ b/arch/arm/mach-imx/src.c
@@ -14,6 +14,7 @@
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_address.h> 16#include <linux/of_address.h>
17#include <linux/smp.h>
17#include <asm/unified.h> 18#include <asm/unified.h>
18 19
19#define SRC_SCR 0x000 20#define SRC_SCR 0x000
@@ -23,10 +24,15 @@
23 24
24static void __iomem *src_base; 25static void __iomem *src_base;
25 26
27#ifndef CONFIG_SMP
28#define cpu_logical_map(cpu) 0
29#endif
30
26void imx_enable_cpu(int cpu, bool enable) 31void imx_enable_cpu(int cpu, bool enable)
27{ 32{
28 u32 mask, val; 33 u32 mask, val;
29 34
35 cpu = cpu_logical_map(cpu);
30 mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1); 36 mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
31 val = readl_relaxed(src_base + SRC_SCR); 37 val = readl_relaxed(src_base + SRC_SCR);
32 val = enable ? val | mask : val & ~mask; 38 val = enable ? val | mask : val & ~mask;
@@ -35,6 +41,7 @@ void imx_enable_cpu(int cpu, bool enable)
35 41
36void imx_set_cpu_jump(int cpu, void *jump_addr) 42void imx_set_cpu_jump(int cpu, void *jump_addr)
37{ 43{
44 cpu = cpu_logical_map(cpu);
38 writel_relaxed(BSYM(virt_to_phys(jump_addr)), 45 writel_relaxed(BSYM(virt_to_phys(jump_addr)),
39 src_base + SRC_GPR1 + cpu * 8); 46 src_base + SRC_GPR1 + cpu * 8);
40} 47}
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index 69156568bc41..4665767a4f79 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -182,7 +182,7 @@ static void __init gplugd_init(void)
182 182
183 /* on-chip devices */ 183 /* on-chip devices */
184 pxa168_add_uart(3); 184 pxa168_add_uart(3);
185 pxa168_add_ssp(0); 185 pxa168_add_ssp(1);
186 pxa168_add_twsi(0, NULL, ARRAY_AND_SIZE(gplugd_i2c_board_info)); 186 pxa168_add_twsi(0, NULL, ARRAY_AND_SIZE(gplugd_i2c_board_info));
187 187
188 pxa168_add_eth(&gplugd_eth_platform_data); 188 pxa168_add_eth(&gplugd_eth_platform_data);
diff --git a/arch/arm/mach-mmp/include/mach/gpio-pxa.h b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
index d14eeaf16322..99b4ce1b6562 100644
--- a/arch/arm/mach-mmp/include/mach/gpio-pxa.h
+++ b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
@@ -7,7 +7,7 @@
7#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000) 7#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
8 8
9#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) 9#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
10#define GPIO_REG(x) (GPIO_REGS_VIRT + (x)) 10#define GPIO_REG(x) (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
11 11
12#define NR_BUILTIN_GPIO IRQ_GPIO_NUM 12#define NR_BUILTIN_GPIO IRQ_GPIO_NUM
13 13
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 4285dfd80b6f..4ad3969b9881 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -15,6 +15,8 @@ obj-$(CONFIG_MSM_SMD) += smd.o smd_debug.o
15obj-$(CONFIG_MSM_SMD) += last_radio_log.o 15obj-$(CONFIG_MSM_SMD) += last_radio_log.o
16obj-$(CONFIG_MSM_SCM) += scm.o scm-boot.o 16obj-$(CONFIG_MSM_SCM) += scm.o scm-boot.o
17 17
18CFLAGS_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
19
18obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 20obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
19obj-$(CONFIG_SMP) += headsmp.o platsmp.o 21obj-$(CONFIG_SMP) += headsmp.o platsmp.o
20 22
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index 71de5062c71e..db81ed531031 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -42,8 +42,8 @@
42 42
43extern struct sys_timer msm_timer; 43extern struct sys_timer msm_timer;
44 44
45static void __init msm7x30_fixup(struct machine_desc *desc, struct tag *tag, 45static void __init msm7x30_fixup(struct tag *tag, char **cmdline,
46 char **cmdline, struct meminfo *mi) 46 struct meminfo *mi)
47{ 47{
48 for (; tag->hdr.size; tag = tag_next(tag)) 48 for (; tag->hdr.size; tag = tag_next(tag))
49 if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) { 49 if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) {
diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c
index b04468e7d00e..6dc1cbd2a595 100644
--- a/arch/arm/mach-msm/board-msm8960.c
+++ b/arch/arm/mach-msm/board-msm8960.c
@@ -32,8 +32,8 @@
32 32
33#include "devices.h" 33#include "devices.h"
34 34
35static void __init msm8960_fixup(struct machine_desc *desc, struct tag *tag, 35static void __init msm8960_fixup(struct tag *tag, char **cmdline,
36 char **cmdline, struct meminfo *mi) 36 struct meminfo *mi)
37{ 37{
38 for (; tag->hdr.size; tag = tag_next(tag)) 38 for (; tag->hdr.size; tag = tag_next(tag))
39 if (tag->hdr.tag == ATAG_MEM && 39 if (tag->hdr.tag == ATAG_MEM &&
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index cf38e2284fa9..44bf71688373 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -28,8 +28,8 @@
28#include <mach/board.h> 28#include <mach/board.h>
29#include <mach/msm_iomap.h> 29#include <mach/msm_iomap.h>
30 30
31static void __init msm8x60_fixup(struct machine_desc *desc, struct tag *tag, 31static void __init msm8x60_fixup(struct tag *tag, char **cmdline,
32 char **cmdline, struct meminfo *mi) 32 struct meminfo *mi)
33{ 33{
34 for (; tag->hdr.size; tag = tag_next(tag)) 34 for (; tag->hdr.size; tag = tag_next(tag))
35 if (tag->hdr.tag == ATAG_MEM && 35 if (tag->hdr.tag == ATAG_MEM &&
diff --git a/arch/arm/mach-msm/devices-iommu.c b/arch/arm/mach-msm/devices-iommu.c
index 24030d0da6e3..0fb7a17df398 100644
--- a/arch/arm/mach-msm/devices-iommu.c
+++ b/arch/arm/mach-msm/devices-iommu.c
@@ -18,6 +18,7 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/bootmem.h> 20#include <linux/bootmem.h>
21#include <linux/module.h>
21#include <mach/irqs.h> 22#include <mach/irqs.h>
22#include <mach/iommu.h> 23#include <mach/iommu.h>
23 24
diff --git a/arch/arm/mach-msm/scm.c b/arch/arm/mach-msm/scm.c
index 232f97a04504..bafabb502580 100644
--- a/arch/arm/mach-msm/scm.c
+++ b/arch/arm/mach-msm/scm.c
@@ -180,6 +180,9 @@ static u32 smc(u32 cmd_addr)
180 __asmeq("%1", "r0") 180 __asmeq("%1", "r0")
181 __asmeq("%2", "r1") 181 __asmeq("%2", "r1")
182 __asmeq("%3", "r2") 182 __asmeq("%3", "r2")
183#ifdef REQUIRES_SEC
184 ".arch_extension sec\n"
185#endif
183 "smc #0 @ switch to secure world\n" 186 "smc #0 @ switch to secure world\n"
184 : "=r" (r0) 187 : "=r" (r0)
185 : "r" (r0), "r" (r1), "r" (r2) 188 : "r" (r0), "r" (r1), "r" (r2)
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c
index 5c837603ff0f..24994bb52147 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-mx5/board-mx51_babbage.c
@@ -362,7 +362,7 @@ static void __init mx51_babbage_init(void)
362{ 362{
363 iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP; 363 iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
364 iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21, 364 iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21,
365 PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP); 365 PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH);
366 366
367 imx51_soc_init(); 367 imx51_soc_init();
368 368
diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-mx5/board-mx53_evk.c
index 6bea31ab8f85..64bbfcea6f35 100644
--- a/arch/arm/mach-mx5/board-mx53_evk.c
+++ b/arch/arm/mach-mx5/board-mx53_evk.c
@@ -106,7 +106,7 @@ static inline void mx53_evk_fec_reset(void)
106 gpio_set_value(MX53_EVK_FEC_PHY_RST, 1); 106 gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
107} 107}
108 108
109static struct fec_platform_data mx53_evk_fec_pdata = { 109static const struct fec_platform_data mx53_evk_fec_pdata __initconst = {
110 .phy = PHY_INTERFACE_MODE_RMII, 110 .phy = PHY_INTERFACE_MODE_RMII,
111}; 111};
112 112
diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-mx5/board-mx53_loco.c
index 7678f7734db6..237bdecd9331 100644
--- a/arch/arm/mach-mx5/board-mx53_loco.c
+++ b/arch/arm/mach-mx5/board-mx53_loco.c
@@ -242,7 +242,7 @@ static inline void mx53_loco_fec_reset(void)
242 gpio_set_value(LOCO_FEC_PHY_RST, 1); 242 gpio_set_value(LOCO_FEC_PHY_RST, 1);
243} 243}
244 244
245static struct fec_platform_data mx53_loco_fec_data = { 245static const struct fec_platform_data mx53_loco_fec_data __initconst = {
246 .phy = PHY_INTERFACE_MODE_RMII, 246 .phy = PHY_INTERFACE_MODE_RMII,
247}; 247};
248 248
diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-mx5/board-mx53_smd.c
index 59c0845eb4a6..d42132a80e8f 100644
--- a/arch/arm/mach-mx5/board-mx53_smd.c
+++ b/arch/arm/mach-mx5/board-mx53_smd.c
@@ -104,7 +104,7 @@ static inline void mx53_smd_fec_reset(void)
104 gpio_set_value(SMD_FEC_PHY_RST, 1); 104 gpio_set_value(SMD_FEC_PHY_RST, 1);
105} 105}
106 106
107static struct fec_platform_data mx53_smd_fec_data = { 107static const struct fec_platform_data mx53_smd_fec_data __initconst = {
108 .phy = PHY_INTERFACE_MODE_RMII, 108 .phy = PHY_INTERFACE_MODE_RMII,
109}; 109};
110 110
diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c
index 2aacf41c48e7..4cb276977190 100644
--- a/arch/arm/mach-mx5/clock-mx51-mx53.c
+++ b/arch/arm/mach-mx5/clock-mx51-mx53.c
@@ -1281,9 +1281,9 @@ DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET,
1281 NULL, NULL, &ipg_clk, &gpt_ipg_clk); 1281 NULL, NULL, &ipg_clk, &gpt_ipg_clk);
1282 1282
1283DEFINE_CLOCK(pwm1_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG6_OFFSET, 1283DEFINE_CLOCK(pwm1_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG6_OFFSET,
1284 NULL, NULL, &ipg_clk, NULL); 1284 NULL, NULL, &ipg_perclk, NULL);
1285DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET, 1285DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET,
1286 NULL, NULL, &ipg_clk, NULL); 1286 NULL, NULL, &ipg_perclk, NULL);
1287 1287
1288/* I2C */ 1288/* I2C */
1289DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET, 1289DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET,
@@ -1634,6 +1634,7 @@ int __init mx53_clocks_init(unsigned long ckil, unsigned long osc,
1634 return 0; 1634 return 0;
1635} 1635}
1636 1636
1637#ifdef CONFIG_OF
1637static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc, 1638static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
1638 unsigned long *ckih1, unsigned long *ckih2) 1639 unsigned long *ckih1, unsigned long *ckih2)
1639{ 1640{
@@ -1671,3 +1672,4 @@ int __init mx53_clocks_init_dt(void)
1671 clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2); 1672 clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
1672 return mx53_clocks_init(ckil, osc, ckih1, ckih2); 1673 return mx53_clocks_init(ckil, osc, ckih1, ckih2);
1673} 1674}
1675#endif
diff --git a/arch/arm/mach-mx5/cpu.c b/arch/arm/mach-mx5/cpu.c
index 5c5328257dca..5e2e7a843860 100644
--- a/arch/arm/mach-mx5/cpu.c
+++ b/arch/arm/mach-mx5/cpu.c
@@ -16,7 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <mach/hardware.h> 18#include <mach/hardware.h>
19#include <asm/io.h> 19#include <linux/io.h>
20 20
21static int mx5_cpu_rev = -1; 21static int mx5_cpu_rev = -1;
22 22
@@ -67,7 +67,8 @@ static int __init mx51_neon_fixup(void)
67 if (!cpu_is_mx51()) 67 if (!cpu_is_mx51())
68 return 0; 68 return 0;
69 69
70 if (mx51_revision() < IMX_CHIP_REVISION_3_0 && (elf_hwcap & HWCAP_NEON)) { 70 if (mx51_revision() < IMX_CHIP_REVISION_3_0 &&
71 (elf_hwcap & HWCAP_NEON)) {
71 elf_hwcap &= ~HWCAP_NEON; 72 elf_hwcap &= ~HWCAP_NEON;
72 pr_info("Turning off NEON support, detected broken NEON implementation\n"); 73 pr_info("Turning off NEON support, detected broken NEON implementation\n");
73 } 74 }
diff --git a/arch/arm/mach-mx5/imx51-dt.c b/arch/arm/mach-mx5/imx51-dt.c
index ccc61585659b..596edd967dbf 100644
--- a/arch/arm/mach-mx5/imx51-dt.c
+++ b/arch/arm/mach-mx5/imx51-dt.c
@@ -44,20 +44,22 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = {
44 { /* sentinel */ } 44 { /* sentinel */ }
45}; 45};
46 46
47static void __init imx51_tzic_add_irq_domain(struct device_node *np, 47static int __init imx51_tzic_add_irq_domain(struct device_node *np,
48 struct device_node *interrupt_parent) 48 struct device_node *interrupt_parent)
49{ 49{
50 irq_domain_add_simple(np, 0); 50 irq_domain_add_simple(np, 0);
51 return 0;
51} 52}
52 53
53static void __init imx51_gpio_add_irq_domain(struct device_node *np, 54static int __init imx51_gpio_add_irq_domain(struct device_node *np,
54 struct device_node *interrupt_parent) 55 struct device_node *interrupt_parent)
55{ 56{
56 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS - 57 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
57 32 * 4; /* imx51 gets 4 gpio ports */
58 58
59 gpio_irq_base -= 32;
59 irq_domain_add_simple(np, gpio_irq_base); 60 irq_domain_add_simple(np, gpio_irq_base);
60 gpio_irq_base += 32; 61
62 return 0;
61} 63}
62 64
63static const struct of_device_id imx51_irq_match[] __initconst = { 65static const struct of_device_id imx51_irq_match[] __initconst = {
diff --git a/arch/arm/mach-mx5/imx53-dt.c b/arch/arm/mach-mx5/imx53-dt.c
index ccaa0b81b768..85bfd5ff21b0 100644
--- a/arch/arm/mach-mx5/imx53-dt.c
+++ b/arch/arm/mach-mx5/imx53-dt.c
@@ -48,20 +48,22 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = {
48 { /* sentinel */ } 48 { /* sentinel */ }
49}; 49};
50 50
51static void __init imx53_tzic_add_irq_domain(struct device_node *np, 51static int __init imx53_tzic_add_irq_domain(struct device_node *np,
52 struct device_node *interrupt_parent) 52 struct device_node *interrupt_parent)
53{ 53{
54 irq_domain_add_simple(np, 0); 54 irq_domain_add_simple(np, 0);
55 return 0;
55} 56}
56 57
57static void __init imx53_gpio_add_irq_domain(struct device_node *np, 58static int __init imx53_gpio_add_irq_domain(struct device_node *np,
58 struct device_node *interrupt_parent) 59 struct device_node *interrupt_parent)
59{ 60{
60 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS - 61 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
61 32 * 7; /* imx53 gets 7 gpio ports */
62 62
63 gpio_irq_base -= 32;
63 irq_domain_add_simple(np, gpio_irq_base); 64 irq_domain_add_simple(np, gpio_irq_base);
64 gpio_irq_base += 32; 65
66 return 0;
65} 67}
66 68
67static const struct of_device_id imx53_irq_match[] __initconst = { 69static const struct of_device_id imx53_irq_match[] __initconst = {
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c
index 26eacc9d0d90..df4a508f240a 100644
--- a/arch/arm/mach-mx5/mm.c
+++ b/arch/arm/mach-mx5/mm.c
@@ -23,7 +23,9 @@
23 23
24static void imx5_idle(void) 24static void imx5_idle(void)
25{ 25{
26 mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF); 26 if (!need_resched())
27 mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
28 local_irq_enable();
27} 29}
28 30
29/* 31/*
@@ -89,7 +91,7 @@ void __init imx51_init_early(void)
89 mxc_set_cpu_type(MXC_CPU_MX51); 91 mxc_set_cpu_type(MXC_CPU_MX51);
90 mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR)); 92 mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR));
91 mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG1_BASE_ADDR)); 93 mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG1_BASE_ADDR));
92 imx_idle = imx5_idle; 94 pm_idle = imx5_idle;
93} 95}
94 96
95void __init imx53_init_early(void) 97void __init imx53_init_early(void)
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
index 229ae3494216..da6e4aad177c 100644
--- a/arch/arm/mach-mxs/clock-mx28.c
+++ b/arch/arm/mach-mxs/clock-mx28.c
@@ -404,7 +404,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \
404 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \ 404 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
405 reg &= ~BM_CLKCTRL_##dr##_DIV; \ 405 reg &= ~BM_CLKCTRL_##dr##_DIV; \
406 reg |= div << BP_CLKCTRL_##dr##_DIV; \ 406 reg |= div << BP_CLKCTRL_##dr##_DIV; \
407 if (reg | (1 << clk->enable_shift)) { \ 407 if (reg & (1 << clk->enable_shift)) { \
408 pr_err("%s: clock is gated\n", __func__); \ 408 pr_err("%s: clock is gated\n", __func__); \
409 return -EINVAL; \ 409 return -EINVAL; \
410 } \ 410 } \
diff --git a/arch/arm/mach-mxs/include/mach/mx28.h b/arch/arm/mach-mxs/include/mach/mx28.h
index 75d86118b76a..30c7990f3c01 100644
--- a/arch/arm/mach-mxs/include/mach/mx28.h
+++ b/arch/arm/mach-mxs/include/mach/mx28.h
@@ -104,8 +104,8 @@
104#define MX28_INT_CAN1 9 104#define MX28_INT_CAN1 9
105#define MX28_INT_LRADC_TOUCH 10 105#define MX28_INT_LRADC_TOUCH 10
106#define MX28_INT_HSADC 13 106#define MX28_INT_HSADC 13
107#define MX28_INT_IRADC_THRESH0 14 107#define MX28_INT_LRADC_THRESH0 14
108#define MX28_INT_IRADC_THRESH1 15 108#define MX28_INT_LRADC_THRESH1 15
109#define MX28_INT_LRADC_CH0 16 109#define MX28_INT_LRADC_CH0 16
110#define MX28_INT_LRADC_CH1 17 110#define MX28_INT_LRADC_CH1 17
111#define MX28_INT_LRADC_CH2 18 111#define MX28_INT_LRADC_CH2 18
diff --git a/arch/arm/mach-mxs/include/mach/mxs.h b/arch/arm/mach-mxs/include/mach/mxs.h
index 0d2d2b470998..bde5f6634747 100644
--- a/arch/arm/mach-mxs/include/mach/mxs.h
+++ b/arch/arm/mach-mxs/include/mach/mxs.h
@@ -30,6 +30,7 @@
30 */ 30 */
31#define cpu_is_mx23() ( \ 31#define cpu_is_mx23() ( \
32 machine_is_mx23evk() || \ 32 machine_is_mx23evk() || \
33 machine_is_stmp378x() || \
33 0) 34 0)
34#define cpu_is_mx28() ( \ 35#define cpu_is_mx28() ( \
35 machine_is_mx28evk() || \ 36 machine_is_mx28evk() || \
diff --git a/arch/arm/mach-mxs/mach-m28evk.c b/arch/arm/mach-mxs/mach-m28evk.c
index 3b1681e4f49a..6b00577b7025 100644
--- a/arch/arm/mach-mxs/mach-m28evk.c
+++ b/arch/arm/mach-mxs/mach-m28evk.c
@@ -361,6 +361,6 @@ static struct sys_timer m28evk_timer = {
361MACHINE_START(M28EVK, "DENX M28 EVK") 361MACHINE_START(M28EVK, "DENX M28 EVK")
362 .map_io = mx28_map_io, 362 .map_io = mx28_map_io,
363 .init_irq = mx28_init_irq, 363 .init_irq = mx28_init_irq,
364 .init_machine = m28evk_init,
365 .timer = &m28evk_timer, 364 .timer = &m28evk_timer,
365 .init_machine = m28evk_init,
366MACHINE_END 366MACHINE_END
diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
index ac2316d53d3c..064ec5abaa55 100644
--- a/arch/arm/mach-mxs/mach-mx28evk.c
+++ b/arch/arm/mach-mxs/mach-mx28evk.c
@@ -471,7 +471,8 @@ static void __init mx28evk_init(void)
471 "mmc0-slot-power"); 471 "mmc0-slot-power");
472 if (ret) 472 if (ret)
473 pr_warn("failed to request gpio mmc0-slot-power: %d\n", ret); 473 pr_warn("failed to request gpio mmc0-slot-power: %d\n", ret);
474 mx28_add_mxs_mmc(0, &mx28evk_mmc_pdata[0]); 474 else
475 mx28_add_mxs_mmc(0, &mx28evk_mmc_pdata[0]);
475 476
476 ret = gpio_request_one(MX28EVK_MMC1_SLOT_POWER, GPIOF_OUT_INIT_LOW, 477 ret = gpio_request_one(MX28EVK_MMC1_SLOT_POWER, GPIOF_OUT_INIT_LOW,
477 "mmc1-slot-power"); 478 "mmc1-slot-power");
@@ -480,7 +481,6 @@ static void __init mx28evk_init(void)
480 else 481 else
481 mx28_add_mxs_mmc(1, &mx28evk_mmc_pdata[1]); 482 mx28_add_mxs_mmc(1, &mx28evk_mmc_pdata[1]);
482 483
483 mx28_add_mxs_mmc(1, &mx28evk_mmc_pdata[1]);
484 mx28_add_rtc_stmp3xxx(); 484 mx28_add_rtc_stmp3xxx();
485 485
486 gpio_led_register_device(0, &mx28evk_led_data); 486 gpio_led_register_device(0, &mx28evk_led_data);
diff --git a/arch/arm/mach-mxs/mach-stmp378x_devb.c b/arch/arm/mach-mxs/mach-stmp378x_devb.c
index 177e53123a02..6834dea38c04 100644
--- a/arch/arm/mach-mxs/mach-stmp378x_devb.c
+++ b/arch/arm/mach-mxs/mach-stmp378x_devb.c
@@ -115,6 +115,6 @@ static struct sys_timer stmp378x_dvb_timer = {
115MACHINE_START(STMP378X, "STMP378X") 115MACHINE_START(STMP378X, "STMP378X")
116 .map_io = mx23_map_io, 116 .map_io = mx23_map_io,
117 .init_irq = mx23_init_irq, 117 .init_irq = mx23_init_irq,
118 .init_machine = stmp378x_dvb_init,
119 .timer = &stmp378x_dvb_timer, 118 .timer = &stmp378x_dvb_timer,
119 .init_machine = stmp378x_dvb_init,
120MACHINE_END 120MACHINE_END
diff --git a/arch/arm/mach-mxs/module-tx28.c b/arch/arm/mach-mxs/module-tx28.c
index 0fcff47009cf..9a7b08b2a925 100644
--- a/arch/arm/mach-mxs/module-tx28.c
+++ b/arch/arm/mach-mxs/module-tx28.c
@@ -66,11 +66,11 @@ static const iomux_cfg_t tx28_fec1_pads[] __initconst = {
66 MX28_PAD_ENET0_CRS__ENET1_RX_EN, 66 MX28_PAD_ENET0_CRS__ENET1_RX_EN,
67}; 67};
68 68
69static struct fec_platform_data tx28_fec0_data = { 69static const struct fec_platform_data tx28_fec0_data __initconst = {
70 .phy = PHY_INTERFACE_MODE_RMII, 70 .phy = PHY_INTERFACE_MODE_RMII,
71}; 71};
72 72
73static struct fec_platform_data tx28_fec1_data = { 73static const struct fec_platform_data tx28_fec1_data __initconst = {
74 .phy = PHY_INTERFACE_MODE_RMII, 74 .phy = PHY_INTERFACE_MODE_RMII,
75}; 75};
76 76
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index e0a028161dde..73f287d6429b 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -171,14 +171,6 @@ config MACH_OMAP_GENERIC
171comment "OMAP CPU Speed" 171comment "OMAP CPU Speed"
172 depends on ARCH_OMAP1 172 depends on ARCH_OMAP1
173 173
174config OMAP_CLOCKS_SET_BY_BOOTLOADER
175 bool "OMAP clocks set by bootloader"
176 depends on ARCH_OMAP1
177 help
178 Enable this option to prevent the kernel from overriding the clock
179 frequencies programmed by bootloader for MPU, DSP, MMUs, TC,
180 internal LCD controller and MPU peripherals.
181
182config OMAP_ARM_216MHZ 174config OMAP_ARM_216MHZ
183 bool "OMAP ARM 216 MHz CPU (1710 only)" 175 bool "OMAP ARM 216 MHz CPU (1710 only)"
184 depends on ARCH_OMAP1 && ARCH_OMAP16XX 176 depends on ARCH_OMAP1 && ARCH_OMAP16XX
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 51bae31cf361..b0f15d234a12 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -302,8 +302,6 @@ static void __init ams_delta_init(void)
302 omap_cfg_reg(J19_1610_CAM_D6); 302 omap_cfg_reg(J19_1610_CAM_D6);
303 omap_cfg_reg(J18_1610_CAM_D7); 303 omap_cfg_reg(J18_1610_CAM_D7);
304 304
305 iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
306
307 omap_board_config = ams_delta_config; 305 omap_board_config = ams_delta_config;
308 omap_board_config_size = ARRAY_SIZE(ams_delta_config); 306 omap_board_config_size = ARRAY_SIZE(ams_delta_config);
309 omap_serial_init(); 307 omap_serial_init();
@@ -373,10 +371,16 @@ static int __init ams_delta_modem_init(void)
373} 371}
374arch_initcall(ams_delta_modem_init); 372arch_initcall(ams_delta_modem_init);
375 373
374static void __init ams_delta_map_io(void)
375{
376 omap15xx_map_io();
377 iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
378}
379
376MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)") 380MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
377 /* Maintainer: Jonathan McDowell <noodles@earth.li> */ 381 /* Maintainer: Jonathan McDowell <noodles@earth.li> */
378 .atag_offset = 0x100, 382 .atag_offset = 0x100,
379 .map_io = omap15xx_map_io, 383 .map_io = ams_delta_map_io,
380 .init_early = omap1_init_early, 384 .init_early = omap1_init_early,
381 .reserve = omap_reserve, 385 .reserve = omap_reserve,
382 .init_irq = omap1_init_irq, 386 .init_irq = omap1_init_irq,
diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h
index eaf09efb91ca..16b1423b454a 100644
--- a/arch/arm/mach-omap1/clock.h
+++ b/arch/arm/mach-omap1/clock.h
@@ -17,7 +17,8 @@
17 17
18#include <plat/clock.h> 18#include <plat/clock.h>
19 19
20extern int __init omap1_clk_init(void); 20int omap1_clk_init(void);
21void omap1_clk_late_init(void);
21extern int omap1_clk_enable(struct clk *clk); 22extern int omap1_clk_enable(struct clk *clk);
22extern void omap1_clk_disable(struct clk *clk); 23extern void omap1_clk_disable(struct clk *clk);
23extern long omap1_clk_round_rate(struct clk *clk, unsigned long rate); 24extern long omap1_clk_round_rate(struct clk *clk, unsigned long rate);
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
index 92400b9eb69f..9ff90a744a21 100644
--- a/arch/arm/mach-omap1/clock_data.c
+++ b/arch/arm/mach-omap1/clock_data.c
@@ -16,6 +16,8 @@
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/cpufreq.h>
20#include <linux/delay.h>
19#include <linux/io.h> 21#include <linux/io.h>
20 22
21#include <asm/mach-types.h> /* for machine_is_* */ 23#include <asm/mach-types.h> /* for machine_is_* */
@@ -767,6 +769,15 @@ static struct clk_functions omap1_clk_functions = {
767 .clk_disable_unused = omap1_clk_disable_unused, 769 .clk_disable_unused = omap1_clk_disable_unused,
768}; 770};
769 771
772static void __init omap1_show_rates(void)
773{
774 pr_notice("Clocking rate (xtal/DPLL1/MPU): "
775 "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
776 ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
777 ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
778 arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
779}
780
770int __init omap1_clk_init(void) 781int __init omap1_clk_init(void)
771{ 782{
772 struct omap_clk *c; 783 struct omap_clk *c;
@@ -835,9 +846,12 @@ int __init omap1_clk_init(void)
835 /* We want to be in syncronous scalable mode */ 846 /* We want to be in syncronous scalable mode */
836 omap_writew(0x1000, ARM_SYSST); 847 omap_writew(0x1000, ARM_SYSST);
837 848
838#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER 849
839 /* Use values set by bootloader. Determine PLL rate and recalculate 850 /*
840 * dependent clocks as if kernel had changed PLL or divisors. 851 * Initially use the values set by bootloader. Determine PLL rate and
852 * recalculate dependent clocks as if kernel had changed PLL or
853 * divisors. See also omap1_clk_late_init() that can reprogram dpll1
854 * after the SRAM is initialized.
841 */ 855 */
842 { 856 {
843 unsigned pll_ctl_val = omap_readw(DPLL_CTL); 857 unsigned pll_ctl_val = omap_readw(DPLL_CTL);
@@ -862,25 +876,10 @@ int __init omap1_clk_init(void)
862 } 876 }
863 } 877 }
864 } 878 }
865#else
866 /* Find the highest supported frequency and enable it */
867 if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
868 printk(KERN_ERR "System frequencies not set. Check your config.\n");
869 /* Guess sane values (60MHz) */
870 omap_writew(0x2290, DPLL_CTL);
871 omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
872 ck_dpll1.rate = 60000000;
873 }
874#endif
875 propagate_rate(&ck_dpll1); 879 propagate_rate(&ck_dpll1);
876 /* Cache rates for clocks connected to ck_ref (not dpll1) */ 880 /* Cache rates for clocks connected to ck_ref (not dpll1) */
877 propagate_rate(&ck_ref); 881 propagate_rate(&ck_ref);
878 printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): " 882 omap1_show_rates();
879 "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
880 ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
881 ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
882 arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
883
884 if (machine_is_omap_perseus2() || machine_is_omap_fsample()) { 883 if (machine_is_omap_perseus2() || machine_is_omap_fsample()) {
885 /* Select slicer output as OMAP input clock */ 884 /* Select slicer output as OMAP input clock */
886 omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1, 885 omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1,
@@ -925,3 +924,27 @@ int __init omap1_clk_init(void)
925 924
926 return 0; 925 return 0;
927} 926}
927
928#define OMAP1_DPLL1_SANE_VALUE 60000000
929
930void __init omap1_clk_late_init(void)
931{
932 unsigned long rate = ck_dpll1.rate;
933
934 if (rate >= OMAP1_DPLL1_SANE_VALUE)
935 return;
936
937 /* System booting at unusable rate, force reprogramming of DPLL1 */
938 ck_dpll1_p->rate = 0;
939
940 /* Find the highest supported frequency and enable it */
941 if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
942 pr_err("System frequencies not set, using default. Check your config.\n");
943 omap_writew(0x2290, DPLL_CTL);
944 omap_writew(cpu_is_omap7xx() ? 0x2005 : 0x0005, ARM_CKCTL);
945 ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
946 }
947 propagate_rate(&ck_dpll1);
948 omap1_show_rates();
949 loops_per_jiffy = cpufreq_scale(loops_per_jiffy, rate, ck_dpll1.rate);
950}
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index 48ef9888e820..475cb2f50d87 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -30,6 +30,8 @@
30#include <plat/omap7xx.h> 30#include <plat/omap7xx.h>
31#include <plat/mcbsp.h> 31#include <plat/mcbsp.h>
32 32
33#include "clock.h"
34
33/*-------------------------------------------------------------------------*/ 35/*-------------------------------------------------------------------------*/
34 36
35#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE) 37#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
@@ -293,6 +295,7 @@ static int __init omap1_init_devices(void)
293 return -ENODEV; 295 return -ENODEV;
294 296
295 omap_sram_init(); 297 omap_sram_init();
298 omap1_clk_late_init();
296 299
297 /* please keep these calls, and their implementations above, 300 /* please keep these calls, and their implementations above,
298 * in alphabetical order so they're easier to sort through. 301 * in alphabetical order so they're easier to sort through.
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 503414718905..e1293aa513d3 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -334,6 +334,7 @@ config MACH_OMAP4_PANDA
334config OMAP3_EMU 334config OMAP3_EMU
335 bool "OMAP3 debugging peripherals" 335 bool "OMAP3 debugging peripherals"
336 depends on ARCH_OMAP3 336 depends on ARCH_OMAP3
337 select ARM_AMBA
337 select OC_ETM 338 select OC_ETM
338 help 339 help
339 Say Y here to enable debugging hardware of omap3 340 Say Y here to enable debugging hardware of omap3
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 69ab1c069134..b009f17dee56 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -4,7 +4,7 @@
4 4
5# Common support 5# Common support
6obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \ 6obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \
7 common.o gpio.o dma.o wd_timer.o 7 common.o gpio.o dma.o wd_timer.o display.o
8 8
9omap-2-3-common = irq.o sdrc.o 9omap-2-3-common = irq.o sdrc.o
10hwmod-common = omap_hwmod.o \ 10hwmod-common = omap_hwmod.o \
@@ -264,7 +264,4 @@ smsc911x-$(CONFIG_SMSC911X) := gpmc-smsc911x.o
264obj-y += $(smsc911x-m) $(smsc911x-y) 264obj-y += $(smsc911x-m) $(smsc911x-y)
265obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o 265obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o
266 266
267disp-$(CONFIG_OMAP2_DSS) := display.o
268obj-y += $(disp-m) $(disp-y)
269
270obj-y += common-board-devices.o twl-common.o 267obj-y += common-board-devices.o twl-common.o
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index ba1aa07bdb29..c15c5c9c9085 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -193,7 +193,7 @@ static struct platform_device rx51_charger_device = {
193static void __init rx51_charger_init(void) 193static void __init rx51_charger_init(void)
194{ 194{
195 WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO, 195 WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
196 GPIOF_OUT_INIT_LOW, "isp1704_reset")); 196 GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
197 197
198 platform_device_register(&rx51_charger_device); 198 platform_device_register(&rx51_charger_device);
199} 199}
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 1fe35c24fba2..942bb4f19f9f 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/cpuidle.h> 26#include <linux/cpuidle.h>
27#include <linux/export.h>
27 28
28#include <plat/prcm.h> 29#include <plat/prcm.h>
29#include <plat/irqs.h> 30#include <plat/irqs.h>
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index adb2756e242f..dce9905d64bb 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -27,8 +27,35 @@
27#include <plat/omap_hwmod.h> 27#include <plat/omap_hwmod.h>
28#include <plat/omap_device.h> 28#include <plat/omap_device.h>
29#include <plat/omap-pm.h> 29#include <plat/omap-pm.h>
30#include <plat/common.h>
30 31
31#include "control.h" 32#include "control.h"
33#include "display.h"
34
35#define DISPC_CONTROL 0x0040
36#define DISPC_CONTROL2 0x0238
37#define DISPC_IRQSTATUS 0x0018
38
39#define DSS_SYSCONFIG 0x10
40#define DSS_SYSSTATUS 0x14
41#define DSS_CONTROL 0x40
42#define DSS_SDI_CONTROL 0x44
43#define DSS_PLL_CONTROL 0x48
44
45#define LCD_EN_MASK (0x1 << 0)
46#define DIGIT_EN_MASK (0x1 << 1)
47
48#define FRAMEDONE_IRQ_SHIFT 0
49#define EVSYNC_EVEN_IRQ_SHIFT 2
50#define EVSYNC_ODD_IRQ_SHIFT 3
51#define FRAMEDONE2_IRQ_SHIFT 22
52#define FRAMEDONETV_IRQ_SHIFT 24
53
54/*
55 * FRAMEDONE_IRQ_TIMEOUT: how long (in milliseconds) to wait during DISPC
56 * reset before deciding that something has gone wrong
57 */
58#define FRAMEDONE_IRQ_TIMEOUT 100
32 59
33static struct platform_device omap_display_device = { 60static struct platform_device omap_display_device = {
34 .name = "omapdss", 61 .name = "omapdss",
@@ -172,3 +199,135 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
172 199
173 return r; 200 return r;
174} 201}
202
203static void dispc_disable_outputs(void)
204{
205 u32 v, irq_mask = 0;
206 bool lcd_en, digit_en, lcd2_en = false;
207 int i;
208 struct omap_dss_dispc_dev_attr *da;
209 struct omap_hwmod *oh;
210
211 oh = omap_hwmod_lookup("dss_dispc");
212 if (!oh) {
213 WARN(1, "display: could not disable outputs during reset - could not find dss_dispc hwmod\n");
214 return;
215 }
216
217 if (!oh->dev_attr) {
218 pr_err("display: could not disable outputs during reset due to missing dev_attr\n");
219 return;
220 }
221
222 da = (struct omap_dss_dispc_dev_attr *)oh->dev_attr;
223
224 /* store value of LCDENABLE and DIGITENABLE bits */
225 v = omap_hwmod_read(oh, DISPC_CONTROL);
226 lcd_en = v & LCD_EN_MASK;
227 digit_en = v & DIGIT_EN_MASK;
228
229 /* store value of LCDENABLE for LCD2 */
230 if (da->manager_count > 2) {
231 v = omap_hwmod_read(oh, DISPC_CONTROL2);
232 lcd2_en = v & LCD_EN_MASK;
233 }
234
235 if (!(lcd_en | digit_en | lcd2_en))
236 return; /* no managers currently enabled */
237
238 /*
239 * If any manager was enabled, we need to disable it before
240 * DSS clocks are disabled or DISPC module is reset
241 */
242 if (lcd_en)
243 irq_mask |= 1 << FRAMEDONE_IRQ_SHIFT;
244
245 if (digit_en) {
246 if (da->has_framedonetv_irq) {
247 irq_mask |= 1 << FRAMEDONETV_IRQ_SHIFT;
248 } else {
249 irq_mask |= 1 << EVSYNC_EVEN_IRQ_SHIFT |
250 1 << EVSYNC_ODD_IRQ_SHIFT;
251 }
252 }
253
254 if (lcd2_en)
255 irq_mask |= 1 << FRAMEDONE2_IRQ_SHIFT;
256
257 /*
258 * clear any previous FRAMEDONE, FRAMEDONETV,
259 * EVSYNC_EVEN/ODD or FRAMEDONE2 interrupts
260 */
261 omap_hwmod_write(irq_mask, oh, DISPC_IRQSTATUS);
262
263 /* disable LCD and TV managers */
264 v = omap_hwmod_read(oh, DISPC_CONTROL);
265 v &= ~(LCD_EN_MASK | DIGIT_EN_MASK);
266 omap_hwmod_write(v, oh, DISPC_CONTROL);
267
268 /* disable LCD2 manager */
269 if (da->manager_count > 2) {
270 v = omap_hwmod_read(oh, DISPC_CONTROL2);
271 v &= ~LCD_EN_MASK;
272 omap_hwmod_write(v, oh, DISPC_CONTROL2);
273 }
274
275 i = 0;
276 while ((omap_hwmod_read(oh, DISPC_IRQSTATUS) & irq_mask) !=
277 irq_mask) {
278 i++;
279 if (i > FRAMEDONE_IRQ_TIMEOUT) {
280 pr_err("didn't get FRAMEDONE1/2 or TV interrupt\n");
281 break;
282 }
283 mdelay(1);
284 }
285}
286
287#define MAX_MODULE_SOFTRESET_WAIT 10000
288int omap_dss_reset(struct omap_hwmod *oh)
289{
290 struct omap_hwmod_opt_clk *oc;
291 int c = 0;
292 int i, r;
293
294 if (!(oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)) {
295 pr_err("dss_core: hwmod data doesn't contain reset data\n");
296 return -EINVAL;
297 }
298
299 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
300 if (oc->_clk)
301 clk_enable(oc->_clk);
302
303 dispc_disable_outputs();
304
305 /* clear SDI registers */
306 if (cpu_is_omap3430()) {
307 omap_hwmod_write(0x0, oh, DSS_SDI_CONTROL);
308 omap_hwmod_write(0x0, oh, DSS_PLL_CONTROL);
309 }
310
311 /*
312 * clear DSS_CONTROL register to switch DSS clock sources to
313 * PRCM clock, if any
314 */
315 omap_hwmod_write(0x0, oh, DSS_CONTROL);
316
317 omap_test_timeout((omap_hwmod_read(oh, oh->class->sysc->syss_offs)
318 & SYSS_RESETDONE_MASK),
319 MAX_MODULE_SOFTRESET_WAIT, c);
320
321 if (c == MAX_MODULE_SOFTRESET_WAIT)
322 pr_warning("dss_core: waiting for reset to finish failed\n");
323 else
324 pr_debug("dss_core: softreset done\n");
325
326 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
327 if (oc->_clk)
328 clk_disable(oc->_clk);
329
330 r = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
331
332 return r;
333}
diff --git a/arch/arm/mach-omap2/display.h b/arch/arm/mach-omap2/display.h
new file mode 100644
index 000000000000..b871b017b352
--- /dev/null
+++ b/arch/arm/mach-omap2/display.h
@@ -0,0 +1,29 @@
1/*
2 * display.h - OMAP2+ integration-specific DSS header
3 *
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __ARCH_ARM_MACH_OMAP2_DISPLAY_H
20#define __ARCH_ARM_MACH_OMAP2_DISPLAY_H
21
22#include <linux/kernel.h>
23
24struct omap_dss_dispc_dev_attr {
25 u8 manager_count;
26 bool has_framedonetv_irq;
27};
28
29#endif
diff --git a/arch/arm/mach-omap2/io.h b/arch/arm/mach-omap2/io.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/arch/arm/mach-omap2/io.h
+++ /dev/null
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index 292eee3be15f..28fcb27005d2 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -145,6 +145,9 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
145 pdata->reg_size = 4; 145 pdata->reg_size = 4;
146 pdata->has_ccr = true; 146 pdata->has_ccr = true;
147 } 147 }
148 pdata->set_clk_src = omap2_mcbsp_set_clk_src;
149 if (id == 1)
150 pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
148 151
149 if (oh->class->rev == MCBSP_CONFIG_TYPE3) { 152 if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
150 if (id == 2) 153 if (id == 2)
@@ -174,9 +177,6 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
174 name, oh->name); 177 name, oh->name);
175 return PTR_ERR(pdev); 178 return PTR_ERR(pdev);
176 } 179 }
177 pdata->set_clk_src = omap2_mcbsp_set_clk_src;
178 if (id == 1)
179 pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
180 omap_mcbsp_count++; 180 omap_mcbsp_count++;
181 return 0; 181 return 0;
182} 182}
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 6b3088db83b7..207a2ff9a8c4 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -749,7 +749,7 @@ static int _count_mpu_irqs(struct omap_hwmod *oh)
749 ohii = &oh->mpu_irqs[i++]; 749 ohii = &oh->mpu_irqs[i++];
750 } while (ohii->irq != -1); 750 } while (ohii->irq != -1);
751 751
752 return i; 752 return i-1;
753} 753}
754 754
755/** 755/**
@@ -772,7 +772,7 @@ static int _count_sdma_reqs(struct omap_hwmod *oh)
772 ohdi = &oh->sdma_reqs[i++]; 772 ohdi = &oh->sdma_reqs[i++];
773 } while (ohdi->dma_req != -1); 773 } while (ohdi->dma_req != -1);
774 774
775 return i; 775 return i-1;
776} 776}
777 777
778/** 778/**
@@ -795,7 +795,7 @@ static int _count_ocp_if_addr_spaces(struct omap_hwmod_ocp_if *os)
795 mem = &os->addr[i++]; 795 mem = &os->addr[i++];
796 } while (mem->pa_start != mem->pa_end); 796 } while (mem->pa_start != mem->pa_end);
797 797
798 return i; 798 return i-1;
799} 799}
800 800
801/** 801/**
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index 6d7206213525..a5409ce3f323 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -875,6 +875,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_slaves[] = {
875}; 875};
876 876
877static struct omap_hwmod_opt_clk dss_opt_clks[] = { 877static struct omap_hwmod_opt_clk dss_opt_clks[] = {
878 /*
879 * The DSS HW needs all DSS clocks enabled during reset. The dss_core
880 * driver does not use these clocks.
881 */
878 { .role = "tv_clk", .clk = "dss_54m_fck" }, 882 { .role = "tv_clk", .clk = "dss_54m_fck" },
879 { .role = "sys_clk", .clk = "dss2_fck" }, 883 { .role = "sys_clk", .clk = "dss2_fck" },
880}; 884};
@@ -899,7 +903,7 @@ static struct omap_hwmod omap2420_dss_core_hwmod = {
899 .slaves_cnt = ARRAY_SIZE(omap2420_dss_slaves), 903 .slaves_cnt = ARRAY_SIZE(omap2420_dss_slaves),
900 .masters = omap2420_dss_masters, 904 .masters = omap2420_dss_masters,
901 .masters_cnt = ARRAY_SIZE(omap2420_dss_masters), 905 .masters_cnt = ARRAY_SIZE(omap2420_dss_masters),
902 .flags = HWMOD_NO_IDLEST, 906 .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
903}; 907};
904 908
905/* l4_core -> dss_dispc */ 909/* l4_core -> dss_dispc */
@@ -939,6 +943,7 @@ static struct omap_hwmod omap2420_dss_dispc_hwmod = {
939 .slaves = omap2420_dss_dispc_slaves, 943 .slaves = omap2420_dss_dispc_slaves,
940 .slaves_cnt = ARRAY_SIZE(omap2420_dss_dispc_slaves), 944 .slaves_cnt = ARRAY_SIZE(omap2420_dss_dispc_slaves),
941 .flags = HWMOD_NO_IDLEST, 945 .flags = HWMOD_NO_IDLEST,
946 .dev_attr = &omap2_3_dss_dispc_dev_attr
942}; 947};
943 948
944/* l4_core -> dss_rfbi */ 949/* l4_core -> dss_rfbi */
@@ -961,6 +966,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_rfbi_slaves[] = {
961 &omap2420_l4_core__dss_rfbi, 966 &omap2420_l4_core__dss_rfbi,
962}; 967};
963 968
969static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
970 { .role = "ick", .clk = "dss_ick" },
971};
972
964static struct omap_hwmod omap2420_dss_rfbi_hwmod = { 973static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
965 .name = "dss_rfbi", 974 .name = "dss_rfbi",
966 .class = &omap2_rfbi_hwmod_class, 975 .class = &omap2_rfbi_hwmod_class,
@@ -972,6 +981,8 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
972 .module_offs = CORE_MOD, 981 .module_offs = CORE_MOD,
973 }, 982 },
974 }, 983 },
984 .opt_clks = dss_rfbi_opt_clks,
985 .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
975 .slaves = omap2420_dss_rfbi_slaves, 986 .slaves = omap2420_dss_rfbi_slaves,
976 .slaves_cnt = ARRAY_SIZE(omap2420_dss_rfbi_slaves), 987 .slaves_cnt = ARRAY_SIZE(omap2420_dss_rfbi_slaves),
977 .flags = HWMOD_NO_IDLEST, 988 .flags = HWMOD_NO_IDLEST,
@@ -981,7 +992,7 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
981static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = { 992static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = {
982 .master = &omap2420_l4_core_hwmod, 993 .master = &omap2420_l4_core_hwmod,
983 .slave = &omap2420_dss_venc_hwmod, 994 .slave = &omap2420_dss_venc_hwmod,
984 .clk = "dss_54m_fck", 995 .clk = "dss_ick",
985 .addr = omap2_dss_venc_addrs, 996 .addr = omap2_dss_venc_addrs,
986 .fw = { 997 .fw = {
987 .omap2 = { 998 .omap2 = {
@@ -1001,7 +1012,7 @@ static struct omap_hwmod_ocp_if *omap2420_dss_venc_slaves[] = {
1001static struct omap_hwmod omap2420_dss_venc_hwmod = { 1012static struct omap_hwmod omap2420_dss_venc_hwmod = {
1002 .name = "dss_venc", 1013 .name = "dss_venc",
1003 .class = &omap2_venc_hwmod_class, 1014 .class = &omap2_venc_hwmod_class,
1004 .main_clk = "dss1_fck", 1015 .main_clk = "dss_54m_fck",
1005 .prcm = { 1016 .prcm = {
1006 .omap2 = { 1017 .omap2 = {
1007 .prcm_reg_id = 1, 1018 .prcm_reg_id = 1,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index a2580d01c3ff..c4f56cb60d7d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -942,6 +942,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_slaves[] = {
942}; 942};
943 943
944static struct omap_hwmod_opt_clk dss_opt_clks[] = { 944static struct omap_hwmod_opt_clk dss_opt_clks[] = {
945 /*
946 * The DSS HW needs all DSS clocks enabled during reset. The dss_core
947 * driver does not use these clocks.
948 */
945 { .role = "tv_clk", .clk = "dss_54m_fck" }, 949 { .role = "tv_clk", .clk = "dss_54m_fck" },
946 { .role = "sys_clk", .clk = "dss2_fck" }, 950 { .role = "sys_clk", .clk = "dss2_fck" },
947}; 951};
@@ -966,7 +970,7 @@ static struct omap_hwmod omap2430_dss_core_hwmod = {
966 .slaves_cnt = ARRAY_SIZE(omap2430_dss_slaves), 970 .slaves_cnt = ARRAY_SIZE(omap2430_dss_slaves),
967 .masters = omap2430_dss_masters, 971 .masters = omap2430_dss_masters,
968 .masters_cnt = ARRAY_SIZE(omap2430_dss_masters), 972 .masters_cnt = ARRAY_SIZE(omap2430_dss_masters),
969 .flags = HWMOD_NO_IDLEST, 973 .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
970}; 974};
971 975
972/* l4_core -> dss_dispc */ 976/* l4_core -> dss_dispc */
@@ -1000,6 +1004,7 @@ static struct omap_hwmod omap2430_dss_dispc_hwmod = {
1000 .slaves = omap2430_dss_dispc_slaves, 1004 .slaves = omap2430_dss_dispc_slaves,
1001 .slaves_cnt = ARRAY_SIZE(omap2430_dss_dispc_slaves), 1005 .slaves_cnt = ARRAY_SIZE(omap2430_dss_dispc_slaves),
1002 .flags = HWMOD_NO_IDLEST, 1006 .flags = HWMOD_NO_IDLEST,
1007 .dev_attr = &omap2_3_dss_dispc_dev_attr
1003}; 1008};
1004 1009
1005/* l4_core -> dss_rfbi */ 1010/* l4_core -> dss_rfbi */
@@ -1016,6 +1021,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_rfbi_slaves[] = {
1016 &omap2430_l4_core__dss_rfbi, 1021 &omap2430_l4_core__dss_rfbi,
1017}; 1022};
1018 1023
1024static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
1025 { .role = "ick", .clk = "dss_ick" },
1026};
1027
1019static struct omap_hwmod omap2430_dss_rfbi_hwmod = { 1028static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
1020 .name = "dss_rfbi", 1029 .name = "dss_rfbi",
1021 .class = &omap2_rfbi_hwmod_class, 1030 .class = &omap2_rfbi_hwmod_class,
@@ -1027,6 +1036,8 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
1027 .module_offs = CORE_MOD, 1036 .module_offs = CORE_MOD,
1028 }, 1037 },
1029 }, 1038 },
1039 .opt_clks = dss_rfbi_opt_clks,
1040 .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
1030 .slaves = omap2430_dss_rfbi_slaves, 1041 .slaves = omap2430_dss_rfbi_slaves,
1031 .slaves_cnt = ARRAY_SIZE(omap2430_dss_rfbi_slaves), 1042 .slaves_cnt = ARRAY_SIZE(omap2430_dss_rfbi_slaves),
1032 .flags = HWMOD_NO_IDLEST, 1043 .flags = HWMOD_NO_IDLEST,
@@ -1036,7 +1047,7 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
1036static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = { 1047static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = {
1037 .master = &omap2430_l4_core_hwmod, 1048 .master = &omap2430_l4_core_hwmod,
1038 .slave = &omap2430_dss_venc_hwmod, 1049 .slave = &omap2430_dss_venc_hwmod,
1039 .clk = "dss_54m_fck", 1050 .clk = "dss_ick",
1040 .addr = omap2_dss_venc_addrs, 1051 .addr = omap2_dss_venc_addrs,
1041 .flags = OCPIF_SWSUP_IDLE, 1052 .flags = OCPIF_SWSUP_IDLE,
1042 .user = OCP_USER_MPU | OCP_USER_SDMA, 1053 .user = OCP_USER_MPU | OCP_USER_SDMA,
@@ -1050,7 +1061,7 @@ static struct omap_hwmod_ocp_if *omap2430_dss_venc_slaves[] = {
1050static struct omap_hwmod omap2430_dss_venc_hwmod = { 1061static struct omap_hwmod omap2430_dss_venc_hwmod = {
1051 .name = "dss_venc", 1062 .name = "dss_venc",
1052 .class = &omap2_venc_hwmod_class, 1063 .class = &omap2_venc_hwmod_class,
1053 .main_clk = "dss1_fck", 1064 .main_clk = "dss_54m_fck",
1054 .prcm = { 1065 .prcm = {
1055 .omap2 = { 1066 .omap2 = {
1056 .prcm_reg_id = 1, 1067 .prcm_reg_id = 1,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
index c451729d289a..c11273da5dcc 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
@@ -11,6 +11,7 @@
11#include <plat/omap_hwmod.h> 11#include <plat/omap_hwmod.h>
12#include <plat/serial.h> 12#include <plat/serial.h>
13#include <plat/dma.h> 13#include <plat/dma.h>
14#include <plat/common.h>
14 15
15#include <mach/irqs.h> 16#include <mach/irqs.h>
16 17
@@ -43,13 +44,15 @@ static struct omap_hwmod_class_sysconfig omap2_dss_sysc = {
43 .rev_offs = 0x0000, 44 .rev_offs = 0x0000,
44 .sysc_offs = 0x0010, 45 .sysc_offs = 0x0010,
45 .syss_offs = 0x0014, 46 .syss_offs = 0x0014,
46 .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), 47 .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
48 SYSS_HAS_RESET_STATUS),
47 .sysc_fields = &omap_hwmod_sysc_type1, 49 .sysc_fields = &omap_hwmod_sysc_type1,
48}; 50};
49 51
50struct omap_hwmod_class omap2_dss_hwmod_class = { 52struct omap_hwmod_class omap2_dss_hwmod_class = {
51 .name = "dss", 53 .name = "dss",
52 .sysc = &omap2_dss_sysc, 54 .sysc = &omap2_dss_sysc,
55 .reset = omap_dss_reset,
53}; 56};
54 57
55/* 58/*
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index bc9035ec87fc..7f8915ad5099 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1369,9 +1369,14 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_slaves[] = {
1369}; 1369};
1370 1370
1371static struct omap_hwmod_opt_clk dss_opt_clks[] = { 1371static struct omap_hwmod_opt_clk dss_opt_clks[] = {
1372 { .role = "tv_clk", .clk = "dss_tv_fck" }, 1372 /*
1373 { .role = "video_clk", .clk = "dss_96m_fck" }, 1373 * The DSS HW needs all DSS clocks enabled during reset. The dss_core
1374 * driver does not use these clocks.
1375 */
1374 { .role = "sys_clk", .clk = "dss2_alwon_fck" }, 1376 { .role = "sys_clk", .clk = "dss2_alwon_fck" },
1377 { .role = "tv_clk", .clk = "dss_tv_fck" },
1378 /* required only on OMAP3430 */
1379 { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
1375}; 1380};
1376 1381
1377static struct omap_hwmod omap3430es1_dss_core_hwmod = { 1382static struct omap_hwmod omap3430es1_dss_core_hwmod = {
@@ -1394,11 +1399,12 @@ static struct omap_hwmod omap3430es1_dss_core_hwmod = {
1394 .slaves_cnt = ARRAY_SIZE(omap3430es1_dss_slaves), 1399 .slaves_cnt = ARRAY_SIZE(omap3430es1_dss_slaves),
1395 .masters = omap3xxx_dss_masters, 1400 .masters = omap3xxx_dss_masters,
1396 .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters), 1401 .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters),
1397 .flags = HWMOD_NO_IDLEST, 1402 .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
1398}; 1403};
1399 1404
1400static struct omap_hwmod omap3xxx_dss_core_hwmod = { 1405static struct omap_hwmod omap3xxx_dss_core_hwmod = {
1401 .name = "dss_core", 1406 .name = "dss_core",
1407 .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
1402 .class = &omap2_dss_hwmod_class, 1408 .class = &omap2_dss_hwmod_class,
1403 .main_clk = "dss1_alwon_fck", /* instead of dss_fck */ 1409 .main_clk = "dss1_alwon_fck", /* instead of dss_fck */
1404 .sdma_reqs = omap3xxx_dss_sdma_chs, 1410 .sdma_reqs = omap3xxx_dss_sdma_chs,
@@ -1456,6 +1462,7 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
1456 .slaves = omap3xxx_dss_dispc_slaves, 1462 .slaves = omap3xxx_dss_dispc_slaves,
1457 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_slaves), 1463 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_slaves),
1458 .flags = HWMOD_NO_IDLEST, 1464 .flags = HWMOD_NO_IDLEST,
1465 .dev_attr = &omap2_3_dss_dispc_dev_attr
1459}; 1466};
1460 1467
1461/* 1468/*
@@ -1486,6 +1493,7 @@ static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = {
1486static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = { 1493static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = {
1487 .master = &omap3xxx_l4_core_hwmod, 1494 .master = &omap3xxx_l4_core_hwmod,
1488 .slave = &omap3xxx_dss_dsi1_hwmod, 1495 .slave = &omap3xxx_dss_dsi1_hwmod,
1496 .clk = "dss_ick",
1489 .addr = omap3xxx_dss_dsi1_addrs, 1497 .addr = omap3xxx_dss_dsi1_addrs,
1490 .fw = { 1498 .fw = {
1491 .omap2 = { 1499 .omap2 = {
@@ -1502,6 +1510,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dsi1_slaves[] = {
1502 &omap3xxx_l4_core__dss_dsi1, 1510 &omap3xxx_l4_core__dss_dsi1,
1503}; 1511};
1504 1512
1513static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = {
1514 { .role = "sys_clk", .clk = "dss2_alwon_fck" },
1515};
1516
1505static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = { 1517static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
1506 .name = "dss_dsi1", 1518 .name = "dss_dsi1",
1507 .class = &omap3xxx_dsi_hwmod_class, 1519 .class = &omap3xxx_dsi_hwmod_class,
@@ -1514,6 +1526,8 @@ static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
1514 .module_offs = OMAP3430_DSS_MOD, 1526 .module_offs = OMAP3430_DSS_MOD,
1515 }, 1527 },
1516 }, 1528 },
1529 .opt_clks = dss_dsi1_opt_clks,
1530 .opt_clks_cnt = ARRAY_SIZE(dss_dsi1_opt_clks),
1517 .slaves = omap3xxx_dss_dsi1_slaves, 1531 .slaves = omap3xxx_dss_dsi1_slaves,
1518 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves), 1532 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves),
1519 .flags = HWMOD_NO_IDLEST, 1533 .flags = HWMOD_NO_IDLEST,
@@ -1540,6 +1554,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_rfbi_slaves[] = {
1540 &omap3xxx_l4_core__dss_rfbi, 1554 &omap3xxx_l4_core__dss_rfbi,
1541}; 1555};
1542 1556
1557static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
1558 { .role = "ick", .clk = "dss_ick" },
1559};
1560
1543static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = { 1561static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
1544 .name = "dss_rfbi", 1562 .name = "dss_rfbi",
1545 .class = &omap2_rfbi_hwmod_class, 1563 .class = &omap2_rfbi_hwmod_class,
@@ -1551,6 +1569,8 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
1551 .module_offs = OMAP3430_DSS_MOD, 1569 .module_offs = OMAP3430_DSS_MOD,
1552 }, 1570 },
1553 }, 1571 },
1572 .opt_clks = dss_rfbi_opt_clks,
1573 .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
1554 .slaves = omap3xxx_dss_rfbi_slaves, 1574 .slaves = omap3xxx_dss_rfbi_slaves,
1555 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves), 1575 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves),
1556 .flags = HWMOD_NO_IDLEST, 1576 .flags = HWMOD_NO_IDLEST,
@@ -1560,7 +1580,7 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
1560static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = { 1580static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = {
1561 .master = &omap3xxx_l4_core_hwmod, 1581 .master = &omap3xxx_l4_core_hwmod,
1562 .slave = &omap3xxx_dss_venc_hwmod, 1582 .slave = &omap3xxx_dss_venc_hwmod,
1563 .clk = "dss_tv_fck", 1583 .clk = "dss_ick",
1564 .addr = omap2_dss_venc_addrs, 1584 .addr = omap2_dss_venc_addrs,
1565 .fw = { 1585 .fw = {
1566 .omap2 = { 1586 .omap2 = {
@@ -1578,10 +1598,15 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_venc_slaves[] = {
1578 &omap3xxx_l4_core__dss_venc, 1598 &omap3xxx_l4_core__dss_venc,
1579}; 1599};
1580 1600
1601static struct omap_hwmod_opt_clk dss_venc_opt_clks[] = {
1602 /* required only on OMAP3430 */
1603 { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
1604};
1605
1581static struct omap_hwmod omap3xxx_dss_venc_hwmod = { 1606static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
1582 .name = "dss_venc", 1607 .name = "dss_venc",
1583 .class = &omap2_venc_hwmod_class, 1608 .class = &omap2_venc_hwmod_class,
1584 .main_clk = "dss1_alwon_fck", 1609 .main_clk = "dss_tv_fck",
1585 .prcm = { 1610 .prcm = {
1586 .omap2 = { 1611 .omap2 = {
1587 .prcm_reg_id = 1, 1612 .prcm_reg_id = 1,
@@ -1589,6 +1614,8 @@ static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
1589 .module_offs = OMAP3430_DSS_MOD, 1614 .module_offs = OMAP3430_DSS_MOD,
1590 }, 1615 },
1591 }, 1616 },
1617 .opt_clks = dss_venc_opt_clks,
1618 .opt_clks_cnt = ARRAY_SIZE(dss_venc_opt_clks),
1592 .slaves = omap3xxx_dss_venc_slaves, 1619 .slaves = omap3xxx_dss_venc_slaves,
1593 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_venc_slaves), 1620 .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_venc_slaves),
1594 .flags = HWMOD_NO_IDLEST, 1621 .flags = HWMOD_NO_IDLEST,
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 7695e5d43316..daaf165af696 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -30,6 +30,7 @@
30#include <plat/mmc.h> 30#include <plat/mmc.h>
31#include <plat/i2c.h> 31#include <plat/i2c.h>
32#include <plat/dmtimer.h> 32#include <plat/dmtimer.h>
33#include <plat/common.h>
33 34
34#include "omap_hwmod_common_data.h" 35#include "omap_hwmod_common_data.h"
35 36
@@ -1187,6 +1188,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_dss_sysc = {
1187static struct omap_hwmod_class omap44xx_dss_hwmod_class = { 1188static struct omap_hwmod_class omap44xx_dss_hwmod_class = {
1188 .name = "dss", 1189 .name = "dss",
1189 .sysc = &omap44xx_dss_sysc, 1190 .sysc = &omap44xx_dss_sysc,
1191 .reset = omap_dss_reset,
1190}; 1192};
1191 1193
1192/* dss */ 1194/* dss */
@@ -1240,12 +1242,12 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_slaves[] = {
1240static struct omap_hwmod_opt_clk dss_opt_clks[] = { 1242static struct omap_hwmod_opt_clk dss_opt_clks[] = {
1241 { .role = "sys_clk", .clk = "dss_sys_clk" }, 1243 { .role = "sys_clk", .clk = "dss_sys_clk" },
1242 { .role = "tv_clk", .clk = "dss_tv_clk" }, 1244 { .role = "tv_clk", .clk = "dss_tv_clk" },
1243 { .role = "dss_clk", .clk = "dss_dss_clk" }, 1245 { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
1244 { .role = "video_clk", .clk = "dss_48mhz_clk" },
1245}; 1246};
1246 1247
1247static struct omap_hwmod omap44xx_dss_hwmod = { 1248static struct omap_hwmod omap44xx_dss_hwmod = {
1248 .name = "dss_core", 1249 .name = "dss_core",
1250 .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
1249 .class = &omap44xx_dss_hwmod_class, 1251 .class = &omap44xx_dss_hwmod_class,
1250 .clkdm_name = "l3_dss_clkdm", 1252 .clkdm_name = "l3_dss_clkdm",
1251 .main_clk = "dss_dss_clk", 1253 .main_clk = "dss_dss_clk",
@@ -1325,6 +1327,11 @@ static struct omap_hwmod_addr_space omap44xx_dss_dispc_addrs[] = {
1325 { } 1327 { }
1326}; 1328};
1327 1329
1330static struct omap_dss_dispc_dev_attr omap44xx_dss_dispc_dev_attr = {
1331 .manager_count = 3,
1332 .has_framedonetv_irq = 1
1333};
1334
1328/* l4_per -> dss_dispc */ 1335/* l4_per -> dss_dispc */
1329static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = { 1336static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = {
1330 .master = &omap44xx_l4_per_hwmod, 1337 .master = &omap44xx_l4_per_hwmod,
@@ -1340,12 +1347,6 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_dispc_slaves[] = {
1340 &omap44xx_l4_per__dss_dispc, 1347 &omap44xx_l4_per__dss_dispc,
1341}; 1348};
1342 1349
1343static struct omap_hwmod_opt_clk dss_dispc_opt_clks[] = {
1344 { .role = "sys_clk", .clk = "dss_sys_clk" },
1345 { .role = "tv_clk", .clk = "dss_tv_clk" },
1346 { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
1347};
1348
1349static struct omap_hwmod omap44xx_dss_dispc_hwmod = { 1350static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
1350 .name = "dss_dispc", 1351 .name = "dss_dispc",
1351 .class = &omap44xx_dispc_hwmod_class, 1352 .class = &omap44xx_dispc_hwmod_class,
@@ -1359,10 +1360,9 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
1359 .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, 1360 .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET,
1360 }, 1361 },
1361 }, 1362 },
1362 .opt_clks = dss_dispc_opt_clks,
1363 .opt_clks_cnt = ARRAY_SIZE(dss_dispc_opt_clks),
1364 .slaves = omap44xx_dss_dispc_slaves, 1363 .slaves = omap44xx_dss_dispc_slaves,
1365 .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dispc_slaves), 1364 .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dispc_slaves),
1365 .dev_attr = &omap44xx_dss_dispc_dev_attr
1366}; 1366};
1367 1367
1368/* 1368/*
@@ -1624,7 +1624,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
1624 .clkdm_name = "l3_dss_clkdm", 1624 .clkdm_name = "l3_dss_clkdm",
1625 .mpu_irqs = omap44xx_dss_hdmi_irqs, 1625 .mpu_irqs = omap44xx_dss_hdmi_irqs,
1626 .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs, 1626 .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs,
1627 .main_clk = "dss_dss_clk", 1627 .main_clk = "dss_48mhz_clk",
1628 .prcm = { 1628 .prcm = {
1629 .omap4 = { 1629 .omap4 = {
1630 .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, 1630 .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
@@ -1785,7 +1785,7 @@ static struct omap_hwmod omap44xx_dss_venc_hwmod = {
1785 .name = "dss_venc", 1785 .name = "dss_venc",
1786 .class = &omap44xx_venc_hwmod_class, 1786 .class = &omap44xx_venc_hwmod_class,
1787 .clkdm_name = "l3_dss_clkdm", 1787 .clkdm_name = "l3_dss_clkdm",
1788 .main_clk = "dss_dss_clk", 1788 .main_clk = "dss_tv_clk",
1789 .prcm = { 1789 .prcm = {
1790 .omap4 = { 1790 .omap4 = {
1791 .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, 1791 .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.c b/arch/arm/mach-omap2/omap_hwmod_common_data.c
index de832ebc93a9..51e5418899fb 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.c
@@ -49,3 +49,7 @@ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2 = {
49 .srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT, 49 .srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT,
50}; 50};
51 51
52struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr = {
53 .manager_count = 2,
54 .has_framedonetv_irq = 0
55};
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.h b/arch/arm/mach-omap2/omap_hwmod_common_data.h
index 39a7c37f4587..ad5d8f04c0b8 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.h
@@ -16,6 +16,8 @@
16 16
17#include <plat/omap_hwmod.h> 17#include <plat/omap_hwmod.h>
18 18
19#include "display.h"
20
19/* Common address space across OMAP2xxx */ 21/* Common address space across OMAP2xxx */
20extern struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[]; 22extern struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[];
21extern struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[]; 23extern struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[];
@@ -111,4 +113,6 @@ extern struct omap_hwmod_class omap2xxx_dma_hwmod_class;
111extern struct omap_hwmod_class omap2xxx_mailbox_hwmod_class; 113extern struct omap_hwmod_class omap2xxx_mailbox_hwmod_class;
112extern struct omap_hwmod_class omap2xxx_mcspi_class; 114extern struct omap_hwmod_class omap2xxx_mcspi_class;
113 115
116extern struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr;
117
114#endif 118#endif
diff --git a/arch/arm/mach-omap2/omap_l3_noc.c b/arch/arm/mach-omap2/omap_l3_noc.c
index 6a66aa5e2a5b..d15225ff5c49 100644
--- a/arch/arm/mach-omap2/omap_l3_noc.c
+++ b/arch/arm/mach-omap2/omap_l3_noc.c
@@ -237,7 +237,7 @@ static int __devexit omap4_l3_remove(struct platform_device *pdev)
237static const struct of_device_id l3_noc_match[] = { 237static const struct of_device_id l3_noc_match[] = {
238 {.compatible = "ti,omap4-l3-noc", }, 238 {.compatible = "ti,omap4-l3-noc", },
239 {}, 239 {},
240} 240};
241MODULE_DEVICE_TABLE(of, l3_noc_match); 241MODULE_DEVICE_TABLE(of, l3_noc_match);
242#else 242#else
243#define l3_noc_match NULL 243#define l3_noc_match NULL
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 1e79bdf313e3..00bff46ca48b 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -24,6 +24,7 @@
24#include "powerdomain.h" 24#include "powerdomain.h"
25#include "clockdomain.h" 25#include "clockdomain.h"
26#include "pm.h" 26#include "pm.h"
27#include "twl-common.h"
27 28
28static struct omap_device_pm_latency *pm_lats; 29static struct omap_device_pm_latency *pm_lats;
29 30
@@ -226,11 +227,8 @@ postcore_initcall(omap2_common_pm_init);
226 227
227static int __init omap2_common_pm_late_init(void) 228static int __init omap2_common_pm_late_init(void)
228{ 229{
229 /* Init the OMAP TWL parameters */
230 omap3_twl_init();
231 omap4_twl_init();
232
233 /* Init the voltage layer */ 230 /* Init the voltage layer */
231 omap_pmic_late_init();
234 omap_voltage_late_init(); 232 omap_voltage_late_init();
235 233
236 /* Initialize the voltages */ 234 /* Initialize the voltages */
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index 6a4f6839a7d9..cf246b39bac7 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -139,7 +139,7 @@ static irqreturn_t sr_interrupt(int irq, void *data)
139 sr_write_reg(sr_info, ERRCONFIG_V1, status); 139 sr_write_reg(sr_info, ERRCONFIG_V1, status);
140 } else if (sr_info->ip_type == SR_TYPE_V2) { 140 } else if (sr_info->ip_type == SR_TYPE_V2) {
141 /* Read the status bits */ 141 /* Read the status bits */
142 sr_read_reg(sr_info, IRQSTATUS); 142 status = sr_read_reg(sr_info, IRQSTATUS);
143 143
144 /* Clear them by writing back */ 144 /* Clear them by writing back */
145 sr_write_reg(sr_info, IRQSTATUS, status); 145 sr_write_reg(sr_info, IRQSTATUS, status);
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 522435772168..10b20c652e5d 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -30,6 +30,7 @@
30#include <plat/usb.h> 30#include <plat/usb.h>
31 31
32#include "twl-common.h" 32#include "twl-common.h"
33#include "pm.h"
33 34
34static struct i2c_board_info __initdata pmic_i2c_board_info = { 35static struct i2c_board_info __initdata pmic_i2c_board_info = {
35 .addr = 0x48, 36 .addr = 0x48,
@@ -48,6 +49,16 @@ void __init omap_pmic_init(int bus, u32 clkrate,
48 omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1); 49 omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
49} 50}
50 51
52void __init omap_pmic_late_init(void)
53{
54 /* Init the OMAP TWL parameters (if PMIC has been registerd) */
55 if (!pmic_i2c_board_info.irq)
56 return;
57
58 omap3_twl_init();
59 omap4_twl_init();
60}
61
51#if defined(CONFIG_ARCH_OMAP3) 62#if defined(CONFIG_ARCH_OMAP3)
52static struct twl4030_usb_data omap3_usb_pdata = { 63static struct twl4030_usb_data omap3_usb_pdata = {
53 .usb_mode = T2_USB_MODE_ULPI, 64 .usb_mode = T2_USB_MODE_ULPI,
diff --git a/arch/arm/mach-omap2/twl-common.h b/arch/arm/mach-omap2/twl-common.h
index 5e83a5bd37fb..275dde8cb27a 100644
--- a/arch/arm/mach-omap2/twl-common.h
+++ b/arch/arm/mach-omap2/twl-common.h
@@ -1,6 +1,8 @@
1#ifndef __OMAP_PMIC_COMMON__ 1#ifndef __OMAP_PMIC_COMMON__
2#define __OMAP_PMIC_COMMON__ 2#define __OMAP_PMIC_COMMON__
3 3
4#include <plat/irqs.h>
5
4#define TWL_COMMON_PDATA_USB (1 << 0) 6#define TWL_COMMON_PDATA_USB (1 << 0)
5#define TWL_COMMON_PDATA_BCI (1 << 1) 7#define TWL_COMMON_PDATA_BCI (1 << 1)
6#define TWL_COMMON_PDATA_MADC (1 << 2) 8#define TWL_COMMON_PDATA_MADC (1 << 2)
@@ -30,6 +32,7 @@ struct twl4030_platform_data;
30 32
31void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq, 33void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
32 struct twl4030_platform_data *pmic_data); 34 struct twl4030_platform_data *pmic_data);
35void omap_pmic_late_init(void);
33 36
34static inline void omap2_pmic_init(const char *pmic_type, 37static inline void omap2_pmic_init(const char *pmic_type,
35 struct twl4030_platform_data *pmic_data) 38 struct twl4030_platform_data *pmic_data)
diff --git a/arch/arm/mach-picoxcell/include/mach/debug-macro.S b/arch/arm/mach-picoxcell/include/mach/debug-macro.S
index 8f2c234ed9d9..58d4ee3ae949 100644
--- a/arch/arm/mach-picoxcell/include/mach/debug-macro.S
+++ b/arch/arm/mach-picoxcell/include/mach/debug-macro.S
@@ -14,7 +14,7 @@
14 14
15#define UART_SHIFT 2 15#define UART_SHIFT 2
16 16
17 .macro addruart, rp, rv 17 .macro addruart, rp, rv, tmp
18 ldr \rv, =PHYS_TO_IO(PICOXCELL_UART1_BASE) 18 ldr \rv, =PHYS_TO_IO(PICOXCELL_UART1_BASE)
19 ldr \rp, =PICOXCELL_UART1_BASE 19 ldr \rp, =PICOXCELL_UART1_BASE
20 .endm 20 .endm
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index cb53160f6c5d..26ebb57719df 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -9,6 +9,7 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/suspend.h> 10#include <linux/suspend.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/module.h>
12#include <linux/of.h> 13#include <linux/of.h>
13#include <linux/of_address.h> 14#include <linux/of_address.h>
14#include <linux/of_device.h> 15#include <linux/of_device.h>
diff --git a/arch/arm/mach-prima2/prima2.c b/arch/arm/mach-prima2/prima2.c
index ef555c041962..a12b689a8702 100644
--- a/arch/arm/mach-prima2/prima2.c
+++ b/arch/arm/mach-prima2/prima2.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <asm/sizes.h>
11#include <asm/mach-types.h> 12#include <asm/mach-types.h>
12#include <asm/mach/arch.h> 13#include <asm/mach/arch.h>
13#include <linux/of.h> 14#include <linux/of.h>
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index fc0b8544e174..4b81f59a4cba 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -307,7 +307,7 @@ static inline void balloon3_mmc_init(void) {}
307/****************************************************************************** 307/******************************************************************************
308 * USB Gadget 308 * USB Gadget
309 ******************************************************************************/ 309 ******************************************************************************/
310#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE) 310#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
311static void balloon3_udc_command(int cmd) 311static void balloon3_udc_command(int cmd)
312{ 312{
313 if (cmd == PXA2XX_UDC_CMD_CONNECT) 313 if (cmd == PXA2XX_UDC_CMD_CONNECT)
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
index 692e1ffc5586..d23b92b80488 100644
--- a/arch/arm/mach-pxa/colibri-pxa320.c
+++ b/arch/arm/mach-pxa/colibri-pxa320.c
@@ -146,7 +146,7 @@ static void __init colibri_pxa320_init_eth(void)
146static inline void __init colibri_pxa320_init_eth(void) {} 146static inline void __init colibri_pxa320_init_eth(void) {}
147#endif /* CONFIG_AX88796 */ 147#endif /* CONFIG_AX88796 */
148 148
149#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE) 149#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
150static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = { 150static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = {
151 .gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96), 151 .gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96),
152 .gpio_pullup = -1, 152 .gpio_pullup = -1,
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index 9c8208ca0415..ffdd70dad327 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -106,7 +106,7 @@ static void __init gumstix_mmc_init(void)
106} 106}
107#endif 107#endif
108 108
109#ifdef CONFIG_USB_GADGET_PXA25X 109#ifdef CONFIG_USB_PXA25X
110static struct gpio_vbus_mach_info gumstix_udc_info = { 110static struct gpio_vbus_mach_info gumstix_udc_info = {
111 .gpio_vbus = GPIO_GUMSTIX_USB_GPIOn, 111 .gpio_vbus = GPIO_GUMSTIX_USB_GPIOn,
112 .gpio_pullup = GPIO_GUMSTIX_USB_GPIOx, 112 .gpio_pullup = GPIO_GUMSTIX_USB_GPIOx,
diff --git a/arch/arm/mach-pxa/include/mach/palm27x.h b/arch/arm/mach-pxa/include/mach/palm27x.h
index f80bbe246afe..d4eac3d6ffb5 100644
--- a/arch/arm/mach-pxa/include/mach/palm27x.h
+++ b/arch/arm/mach-pxa/include/mach/palm27x.h
@@ -37,8 +37,8 @@ extern void __init palm27x_lcd_init(int power,
37#define palm27x_lcd_init(power, mode) do {} while (0) 37#define palm27x_lcd_init(power, mode) do {} while (0)
38#endif 38#endif
39 39
40#if defined(CONFIG_USB_GADGET_PXA27X) || \ 40#if defined(CONFIG_USB_PXA27X) || \
41 defined(CONFIG_USB_GADGET_PXA27X_MODULE) 41 defined(CONFIG_USB_PXA27X_MODULE)
42extern void __init palm27x_udc_init(int vbus, int pullup, 42extern void __init palm27x_udc_init(int vbus, int pullup,
43 int vbus_inverted); 43 int vbus_inverted);
44#else 44#else
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 325c245c0a0d..fbc10d7b95d1 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -164,8 +164,8 @@ void __init palm27x_lcd_init(int power, struct pxafb_mode_info *mode)
164/****************************************************************************** 164/******************************************************************************
165 * USB Gadget 165 * USB Gadget
166 ******************************************************************************/ 166 ******************************************************************************/
167#if defined(CONFIG_USB_GADGET_PXA27X) || \ 167#if defined(CONFIG_USB_PXA27X) || \
168 defined(CONFIG_USB_GADGET_PXA27X_MODULE) 168 defined(CONFIG_USB_PXA27X_MODULE)
169static struct gpio_vbus_mach_info palm27x_udc_info = { 169static struct gpio_vbus_mach_info palm27x_udc_info = {
170 .gpio_vbus_inverted = 1, 170 .gpio_vbus_inverted = 1,
171}; 171};
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index 6ec7caefb37c..2c24c67fd92b 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -338,7 +338,7 @@ static inline void palmtc_mkp_init(void) {}
338/****************************************************************************** 338/******************************************************************************
339 * UDC 339 * UDC
340 ******************************************************************************/ 340 ******************************************************************************/
341#if defined(CONFIG_USB_GADGET_PXA25X)||defined(CONFIG_USB_GADGET_PXA25X_MODULE) 341#if defined(CONFIG_USB_PXA25X)||defined(CONFIG_USB_PXA25X_MODULE)
342static struct gpio_vbus_mach_info palmtc_udc_info = { 342static struct gpio_vbus_mach_info palmtc_udc_info = {
343 .gpio_vbus = GPIO_NR_PALMTC_USB_DETECT_N, 343 .gpio_vbus = GPIO_NR_PALMTC_USB_DETECT_N,
344 .gpio_vbus_inverted = 1, 344 .gpio_vbus_inverted = 1,
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index a7539a6ed1ff..ca0c6615028c 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -343,7 +343,7 @@ static inline void vpac270_uhc_init(void) {}
343/****************************************************************************** 343/******************************************************************************
344 * USB Gadget 344 * USB Gadget
345 ******************************************************************************/ 345 ******************************************************************************/
346#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE) 346#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
347static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = { 347static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = {
348 .gpio_vbus = GPIO41_VPAC270_UDC_DETECT, 348 .gpio_vbus = GPIO41_VPAC270_UDC_DETECT,
349 .gpio_pullup = -1, 349 .gpio_pullup = -1,
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c
index 5e6b42089eb4..3341fd118723 100644
--- a/arch/arm/mach-s3c64xx/dev-spi.c
+++ b/arch/arm/mach-s3c64xx/dev-spi.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/export.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/dma-mapping.h> 15#include <linux/dma-mapping.h>
15#include <linux/gpio.h> 16#include <linux/gpio.h>
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 66668565ee75..f208154b1382 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -8,7 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/module.h> 11#include <linux/export.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/i2c.h> 13#include <linux/i2c.h>
14 14
diff --git a/arch/arm/mach-s3c64xx/s3c6400.c b/arch/arm/mach-s3c64xx/s3c6400.c
index 7a3bc32df425..51c00f2453c6 100644
--- a/arch/arm/mach-s3c64xx/s3c6400.c
+++ b/arch/arm/mach-s3c64xx/s3c6400.c
@@ -70,7 +70,7 @@ void __init s3c6400_init_irq(void)
70 s3c64xx_init_irq(~0 & ~(0xf << 5), ~0); 70 s3c64xx_init_irq(~0 & ~(0xf << 5), ~0);
71} 71}
72 72
73struct sysdev_class s3c6400_sysclass = { 73static struct sysdev_class s3c6400_sysclass = {
74 .name = "s3c6400-core", 74 .name = "s3c6400-core",
75}; 75};
76 76
diff --git a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
index 83d2afb79e9f..2cf80026c58d 100644
--- a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
+++ b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
@@ -20,7 +20,7 @@
20#include <plat/fb.h> 20#include <plat/fb.h>
21#include <plat/gpio-cfg.h> 21#include <plat/gpio-cfg.h>
22 22
23extern void s3c64xx_fb_gpio_setup_24bpp(void) 23void s3c64xx_fb_gpio_setup_24bpp(void)
24{ 24{
25 s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2)); 25 s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2));
26 s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2)); 26 s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2));
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index a9106c392398..8662ef6e5681 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -273,6 +273,7 @@ static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = {
273 273
274static struct platform_pwm_backlight_data smdkv210_bl_data = { 274static struct platform_pwm_backlight_data smdkv210_bl_data = {
275 .pwm_id = 3, 275 .pwm_id = 3,
276 .pwm_period_ns = 1000,
276}; 277};
277 278
278static void __init smdkv210_map_io(void) 279static void __init smdkv210_map_io(void)
diff --git a/arch/arm/mach-sa1100/Makefile.boot b/arch/arm/mach-sa1100/Makefile.boot
index 5a616f6e5612..f7951aa04562 100644
--- a/arch/arm/mach-sa1100/Makefile.boot
+++ b/arch/arm/mach-sa1100/Makefile.boot
@@ -1,5 +1,5 @@
1ifeq ($(CONFIG_ARCH_SA1100),y) 1ifeq ($(CONFIG_SA1111),y)
2 zreladdr-$(CONFIG_SA1111) += 0xc0208000 2 zreladdr-y += 0xc0208000
3else 3else
4 zreladdr-y += 0xc0008000 4 zreladdr-y += 0xc0008000
5endif 5endif
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 2aec2f732515..737bdc631b0d 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5# Common objects 5# Common objects
6obj-y := timer.o console.o clock.o pm_runtime.o 6obj-y := timer.o console.o clock.o
7 7
8# CPU objects 8# CPU objects
9obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o 9obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 83624e26b884..b862e9f81e3e 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -515,14 +515,14 @@ static void __init ag5evm_init(void)
515 /* enable MMCIF */ 515 /* enable MMCIF */
516 gpio_request(GPIO_FN_MMCCLK0, NULL); 516 gpio_request(GPIO_FN_MMCCLK0, NULL);
517 gpio_request(GPIO_FN_MMCCMD0_PU, NULL); 517 gpio_request(GPIO_FN_MMCCMD0_PU, NULL);
518 gpio_request(GPIO_FN_MMCD0_0, NULL); 518 gpio_request(GPIO_FN_MMCD0_0_PU, NULL);
519 gpio_request(GPIO_FN_MMCD0_1, NULL); 519 gpio_request(GPIO_FN_MMCD0_1_PU, NULL);
520 gpio_request(GPIO_FN_MMCD0_2, NULL); 520 gpio_request(GPIO_FN_MMCD0_2_PU, NULL);
521 gpio_request(GPIO_FN_MMCD0_3, NULL); 521 gpio_request(GPIO_FN_MMCD0_3_PU, NULL);
522 gpio_request(GPIO_FN_MMCD0_4, NULL); 522 gpio_request(GPIO_FN_MMCD0_4_PU, NULL);
523 gpio_request(GPIO_FN_MMCD0_5, NULL); 523 gpio_request(GPIO_FN_MMCD0_5_PU, NULL);
524 gpio_request(GPIO_FN_MMCD0_6, NULL); 524 gpio_request(GPIO_FN_MMCD0_6_PU, NULL);
525 gpio_request(GPIO_FN_MMCD0_7, NULL); 525 gpio_request(GPIO_FN_MMCD0_7_PU, NULL);
526 gpio_request(GPIO_PORT208, NULL); /* Reset */ 526 gpio_request(GPIO_PORT208, NULL); /* Reset */
527 gpio_direction_output(GPIO_PORT208, 1); 527 gpio_direction_output(GPIO_PORT208, 1);
528 528
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index a3aa0f6df964..4c865ece9ac4 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -201,7 +201,7 @@ static struct physmap_flash_data nor_flash_data = {
201static struct resource nor_flash_resources[] = { 201static struct resource nor_flash_resources[] = {
202 [0] = { 202 [0] = {
203 .start = 0x20000000, /* CS0 shadow instead of regular CS0 */ 203 .start = 0x20000000, /* CS0 shadow instead of regular CS0 */
204 .end = 0x28000000 - 1, /* needed by USB MASK ROM boot */ 204 .end = 0x28000000 - 1, /* needed by USB MASK ROM boot */
205 .flags = IORESOURCE_MEM, 205 .flags = IORESOURCE_MEM,
206 } 206 }
207}; 207};
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c
index adc73122bf20..bd9a78424d6b 100644
--- a/arch/arm/mach-shmobile/board-kota2.c
+++ b/arch/arm/mach-shmobile/board-kota2.c
@@ -48,6 +48,7 @@
48#include <asm/hardware/cache-l2x0.h> 48#include <asm/hardware/cache-l2x0.h>
49#include <asm/traps.h> 49#include <asm/traps.h>
50 50
51/* SMSC 9220 */
51static struct resource smsc9220_resources[] = { 52static struct resource smsc9220_resources[] = {
52 [0] = { 53 [0] = {
53 .start = 0x14000000, /* CS5A */ 54 .start = 0x14000000, /* CS5A */
@@ -77,6 +78,7 @@ static struct platform_device eth_device = {
77 .num_resources = ARRAY_SIZE(smsc9220_resources), 78 .num_resources = ARRAY_SIZE(smsc9220_resources),
78}; 79};
79 80
81/* KEYSC */
80static struct sh_keysc_info keysc_platdata = { 82static struct sh_keysc_info keysc_platdata = {
81 .mode = SH_KEYSC_MODE_6, 83 .mode = SH_KEYSC_MODE_6,
82 .scan_timing = 3, 84 .scan_timing = 3,
@@ -120,6 +122,7 @@ static struct platform_device keysc_device = {
120 }, 122 },
121}; 123};
122 124
125/* GPIO KEY */
123#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 } 126#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 }
124 127
125static struct gpio_keys_button gpio_buttons[] = { 128static struct gpio_keys_button gpio_buttons[] = {
@@ -150,6 +153,7 @@ static struct platform_device gpio_keys_device = {
150 }, 153 },
151}; 154};
152 155
156/* GPIO LED */
153#define GPIO_LED(n, g) { .name = n, .gpio = g } 157#define GPIO_LED(n, g) { .name = n, .gpio = g }
154 158
155static struct gpio_led gpio_leds[] = { 159static struct gpio_led gpio_leds[] = {
@@ -175,6 +179,7 @@ static struct platform_device gpio_leds_device = {
175 }, 179 },
176}; 180};
177 181
182/* MMCIF */
178static struct resource mmcif_resources[] = { 183static struct resource mmcif_resources[] = {
179 [0] = { 184 [0] = {
180 .name = "MMCIF", 185 .name = "MMCIF",
@@ -207,6 +212,7 @@ static struct platform_device mmcif_device = {
207 .resource = mmcif_resources, 212 .resource = mmcif_resources,
208}; 213};
209 214
215/* SDHI0 */
210static struct sh_mobile_sdhi_info sdhi0_info = { 216static struct sh_mobile_sdhi_info sdhi0_info = {
211 .tmio_caps = MMC_CAP_SD_HIGHSPEED, 217 .tmio_caps = MMC_CAP_SD_HIGHSPEED,
212 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT, 218 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
@@ -243,6 +249,7 @@ static struct platform_device sdhi0_device = {
243 }, 249 },
244}; 250};
245 251
252/* SDHI1 */
246static struct sh_mobile_sdhi_info sdhi1_info = { 253static struct sh_mobile_sdhi_info sdhi1_info = {
247 .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ, 254 .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
248 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT, 255 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 66975921e646..995a9c3aec8f 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -476,7 +476,7 @@ static struct clk_ops fsidiv_clk_ops = {
476 .disable = fsidiv_disable, 476 .disable = fsidiv_disable,
477}; 477};
478 478
479static struct clk_mapping sh7372_fsidiva_clk_mapping = { 479static struct clk_mapping fsidiva_clk_mapping = {
480 .phys = FSIDIVA, 480 .phys = FSIDIVA,
481 .len = 8, 481 .len = 8,
482}; 482};
@@ -484,10 +484,10 @@ static struct clk_mapping sh7372_fsidiva_clk_mapping = {
484struct clk sh7372_fsidiva_clk = { 484struct clk sh7372_fsidiva_clk = {
485 .ops = &fsidiv_clk_ops, 485 .ops = &fsidiv_clk_ops,
486 .parent = &div6_reparent_clks[DIV6_FSIA], /* late install */ 486 .parent = &div6_reparent_clks[DIV6_FSIA], /* late install */
487 .mapping = &sh7372_fsidiva_clk_mapping, 487 .mapping = &fsidiva_clk_mapping,
488}; 488};
489 489
490static struct clk_mapping sh7372_fsidivb_clk_mapping = { 490static struct clk_mapping fsidivb_clk_mapping = {
491 .phys = FSIDIVB, 491 .phys = FSIDIVB,
492 .len = 8, 492 .len = 8,
493}; 493};
@@ -495,7 +495,7 @@ static struct clk_mapping sh7372_fsidivb_clk_mapping = {
495struct clk sh7372_fsidivb_clk = { 495struct clk sh7372_fsidivb_clk = {
496 .ops = &fsidiv_clk_ops, 496 .ops = &fsidiv_clk_ops,
497 .parent = &div6_reparent_clks[DIV6_FSIB], /* late install */ 497 .parent = &div6_reparent_clks[DIV6_FSIB], /* late install */
498 .mapping = &sh7372_fsidivb_clk_mapping, 498 .mapping = &fsidivb_clk_mapping,
499}; 499};
500 500
501static struct clk *late_main_clks[] = { 501static struct clk *late_main_clks[] = {
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
index 2e44f11f592e..1b2334277e85 100644
--- a/arch/arm/mach-shmobile/cpuidle.c
+++ b/arch/arm/mach-shmobile/cpuidle.c
@@ -26,65 +26,59 @@ void (*shmobile_cpuidle_modes[CPUIDLE_STATE_MAX])(void) = {
26}; 26};
27 27
28static int shmobile_cpuidle_enter(struct cpuidle_device *dev, 28static int shmobile_cpuidle_enter(struct cpuidle_device *dev,
29 struct cpuidle_state *state) 29 struct cpuidle_driver *drv,
30 int index)
30{ 31{
31 ktime_t before, after; 32 ktime_t before, after;
32 int requested_state = state - &dev->states[0];
33 33
34 dev->last_state = &dev->states[requested_state];
35 before = ktime_get(); 34 before = ktime_get();
36 35
37 local_irq_disable(); 36 local_irq_disable();
38 local_fiq_disable(); 37 local_fiq_disable();
39 38
40 shmobile_cpuidle_modes[requested_state](); 39 shmobile_cpuidle_modes[index]();
41 40
42 local_irq_enable(); 41 local_irq_enable();
43 local_fiq_enable(); 42 local_fiq_enable();
44 43
45 after = ktime_get(); 44 after = ktime_get();
46 return ktime_to_ns(ktime_sub(after, before)) >> 10; 45 dev->last_residency = ktime_to_ns(ktime_sub(after, before)) >> 10;
46
47 return index;
47} 48}
48 49
49static struct cpuidle_device shmobile_cpuidle_dev; 50static struct cpuidle_device shmobile_cpuidle_dev;
50static struct cpuidle_driver shmobile_cpuidle_driver = { 51static struct cpuidle_driver shmobile_cpuidle_driver = {
51 .name = "shmobile_cpuidle", 52 .name = "shmobile_cpuidle",
52 .owner = THIS_MODULE, 53 .owner = THIS_MODULE,
54 .states[0] = {
55 .name = "C1",
56 .desc = "WFI",
57 .exit_latency = 1,
58 .target_residency = 1 * 2,
59 .flags = CPUIDLE_FLAG_TIME_VALID,
60 },
61 .safe_state_index = 0, /* C1 */
62 .state_count = 1,
53}; 63};
54 64
55void (*shmobile_cpuidle_setup)(struct cpuidle_device *dev); 65void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
56 66
57static int shmobile_cpuidle_init(void) 67static int shmobile_cpuidle_init(void)
58{ 68{
59 struct cpuidle_device *dev = &shmobile_cpuidle_dev; 69 struct cpuidle_device *dev = &shmobile_cpuidle_dev;
60 struct cpuidle_state *state; 70 struct cpuidle_driver *drv = &shmobile_cpuidle_driver;
61 int i; 71 int i;
62 72
63 cpuidle_register_driver(&shmobile_cpuidle_driver); 73 for (i = 0; i < CPUIDLE_STATE_MAX; i++)
64 74 drv->states[i].enter = shmobile_cpuidle_enter;
65 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
66 dev->states[i].name[0] = '\0';
67 dev->states[i].desc[0] = '\0';
68 dev->states[i].enter = shmobile_cpuidle_enter;
69 }
70
71 i = CPUIDLE_DRIVER_STATE_START;
72
73 state = &dev->states[i++];
74 snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
75 strncpy(state->desc, "WFI", CPUIDLE_DESC_LEN);
76 state->exit_latency = 1;
77 state->target_residency = 1 * 2;
78 state->power_usage = 3;
79 state->flags = 0;
80 state->flags |= CPUIDLE_FLAG_TIME_VALID;
81
82 dev->safe_state = state;
83 dev->state_count = i;
84 75
85 if (shmobile_cpuidle_setup) 76 if (shmobile_cpuidle_setup)
86 shmobile_cpuidle_setup(dev); 77 shmobile_cpuidle_setup(drv);
78
79 cpuidle_register_driver(drv);
87 80
81 dev->state_count = drv->state_count;
88 cpuidle_register_device(dev); 82 cpuidle_register_device(dev);
89 83
90 return 0; 84 return 0;
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index c0cdbf997c91..834bd6cd508f 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -9,9 +9,9 @@ extern int clk_init(void);
9extern void shmobile_handle_irq_intc(struct pt_regs *); 9extern void shmobile_handle_irq_intc(struct pt_regs *);
10extern void shmobile_handle_irq_gic(struct pt_regs *); 10extern void shmobile_handle_irq_gic(struct pt_regs *);
11extern struct platform_suspend_ops shmobile_suspend_ops; 11extern struct platform_suspend_ops shmobile_suspend_ops;
12struct cpuidle_device; 12struct cpuidle_driver;
13extern void (*shmobile_cpuidle_modes[])(void); 13extern void (*shmobile_cpuidle_modes[])(void);
14extern void (*shmobile_cpuidle_setup)(struct cpuidle_device *dev); 14extern void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
15 15
16extern void sh7367_init_irq(void); 16extern void sh7367_init_irq(void);
17extern void sh7367_add_early_devices(void); 17extern void sh7367_add_early_devices(void);
diff --git a/arch/arm/mach-shmobile/include/mach/sh73a0.h b/arch/arm/mach-shmobile/include/mach/sh73a0.h
index 18ae6a990bc2..881d515a9686 100644
--- a/arch/arm/mach-shmobile/include/mach/sh73a0.h
+++ b/arch/arm/mach-shmobile/include/mach/sh73a0.h
@@ -470,6 +470,14 @@ enum {
470 GPIO_FN_SDHICMD2_PU, 470 GPIO_FN_SDHICMD2_PU,
471 GPIO_FN_MMCCMD0_PU, 471 GPIO_FN_MMCCMD0_PU,
472 GPIO_FN_MMCCMD1_PU, 472 GPIO_FN_MMCCMD1_PU,
473 GPIO_FN_MMCD0_0_PU,
474 GPIO_FN_MMCD0_1_PU,
475 GPIO_FN_MMCD0_2_PU,
476 GPIO_FN_MMCD0_3_PU,
477 GPIO_FN_MMCD0_4_PU,
478 GPIO_FN_MMCD0_5_PU,
479 GPIO_FN_MMCD0_6_PU,
480 GPIO_FN_MMCD0_7_PU,
473 GPIO_FN_FSIACK_PU, 481 GPIO_FN_FSIACK_PU,
474 GPIO_FN_FSIAILR_PU, 482 GPIO_FN_FSIAILR_PU,
475 GPIO_FN_FSIAIBT_PU, 483 GPIO_FN_FSIAIBT_PU,
diff --git a/arch/arm/mach-shmobile/pfc-sh7367.c b/arch/arm/mach-shmobile/pfc-sh7367.c
index 128555e76e43..e6e524654e67 100644
--- a/arch/arm/mach-shmobile/pfc-sh7367.c
+++ b/arch/arm/mach-shmobile/pfc-sh7367.c
@@ -21,68 +21,49 @@
21#include <linux/gpio.h> 21#include <linux/gpio.h>
22#include <mach/sh7367.h> 22#include <mach/sh7367.h>
23 23
24#define _1(fn, pfx, sfx) fn(pfx, sfx) 24#define CPU_ALL_PORT(fn, pfx, sfx) \
25 25 PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
26#define _10(fn, pfx, sfx) \ 26 PORT_10(fn, pfx##10, sfx), PORT_90(fn, pfx##1, sfx), \
27 _1(fn, pfx##0, sfx), _1(fn, pfx##1, sfx), \ 27 PORT_10(fn, pfx##20, sfx), PORT_10(fn, pfx##21, sfx), \
28 _1(fn, pfx##2, sfx), _1(fn, pfx##3, sfx), \ 28 PORT_10(fn, pfx##22, sfx), PORT_10(fn, pfx##23, sfx), \
29 _1(fn, pfx##4, sfx), _1(fn, pfx##5, sfx), \ 29 PORT_10(fn, pfx##24, sfx), PORT_10(fn, pfx##25, sfx), \
30 _1(fn, pfx##6, sfx), _1(fn, pfx##7, sfx), \ 30 PORT_10(fn, pfx##26, sfx), PORT_1(fn, pfx##270, sfx), \
31 _1(fn, pfx##8, sfx), _1(fn, pfx##9, sfx) 31 PORT_1(fn, pfx##271, sfx), PORT_1(fn, pfx##272, sfx)
32
33#define _90(fn, pfx, sfx) \
34 _10(fn, pfx##1, sfx), _10(fn, pfx##2, sfx), \
35 _10(fn, pfx##3, sfx), _10(fn, pfx##4, sfx), \
36 _10(fn, pfx##5, sfx), _10(fn, pfx##6, sfx), \
37 _10(fn, pfx##7, sfx), _10(fn, pfx##8, sfx), \
38 _10(fn, pfx##9, sfx)
39
40#define _273(fn, pfx, sfx) \
41 _10(fn, pfx, sfx), _90(fn, pfx, sfx), \
42 _10(fn, pfx##10, sfx), _90(fn, pfx##1, sfx), \
43 _10(fn, pfx##20, sfx), _10(fn, pfx##21, sfx), \
44 _10(fn, pfx##22, sfx), _10(fn, pfx##23, sfx), \
45 _10(fn, pfx##24, sfx), _10(fn, pfx##25, sfx), \
46 _10(fn, pfx##26, sfx), _1(fn, pfx##270, sfx), \
47 _1(fn, pfx##271, sfx), _1(fn, pfx##272, sfx)
48
49#define _PORT(pfx, sfx) pfx##_##sfx
50#define PORT_273(str) _273(_PORT, PORT, str)
51 32
52enum { 33enum {
53 PINMUX_RESERVED = 0, 34 PINMUX_RESERVED = 0,
54 35
55 PINMUX_DATA_BEGIN, 36 PINMUX_DATA_BEGIN,
56 PORT_273(DATA), /* PORT0_DATA -> PORT272_DATA */ 37 PORT_ALL(DATA), /* PORT0_DATA -> PORT272_DATA */
57 PINMUX_DATA_END, 38 PINMUX_DATA_END,
58 39
59 PINMUX_INPUT_BEGIN, 40 PINMUX_INPUT_BEGIN,
60 PORT_273(IN), /* PORT0_IN -> PORT272_IN */ 41 PORT_ALL(IN), /* PORT0_IN -> PORT272_IN */
61 PINMUX_INPUT_END, 42 PINMUX_INPUT_END,
62 43
63 PINMUX_INPUT_PULLUP_BEGIN, 44 PINMUX_INPUT_PULLUP_BEGIN,
64 PORT_273(IN_PU), /* PORT0_IN_PU -> PORT272_IN_PU */ 45 PORT_ALL(IN_PU), /* PORT0_IN_PU -> PORT272_IN_PU */
65 PINMUX_INPUT_PULLUP_END, 46 PINMUX_INPUT_PULLUP_END,
66 47
67 PINMUX_INPUT_PULLDOWN_BEGIN, 48 PINMUX_INPUT_PULLDOWN_BEGIN,
68 PORT_273(IN_PD), /* PORT0_IN_PD -> PORT272_IN_PD */ 49 PORT_ALL(IN_PD), /* PORT0_IN_PD -> PORT272_IN_PD */
69 PINMUX_INPUT_PULLDOWN_END, 50 PINMUX_INPUT_PULLDOWN_END,
70 51
71 PINMUX_OUTPUT_BEGIN, 52 PINMUX_OUTPUT_BEGIN,
72 PORT_273(OUT), /* PORT0_OUT -> PORT272_OUT */ 53 PORT_ALL(OUT), /* PORT0_OUT -> PORT272_OUT */
73 PINMUX_OUTPUT_END, 54 PINMUX_OUTPUT_END,
74 55
75 PINMUX_FUNCTION_BEGIN, 56 PINMUX_FUNCTION_BEGIN,
76 PORT_273(FN_IN), /* PORT0_FN_IN -> PORT272_FN_IN */ 57 PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT272_FN_IN */
77 PORT_273(FN_OUT), /* PORT0_FN_OUT -> PORT272_FN_OUT */ 58 PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT272_FN_OUT */
78 PORT_273(FN0), /* PORT0_FN0 -> PORT272_FN0 */ 59 PORT_ALL(FN0), /* PORT0_FN0 -> PORT272_FN0 */
79 PORT_273(FN1), /* PORT0_FN1 -> PORT272_FN1 */ 60 PORT_ALL(FN1), /* PORT0_FN1 -> PORT272_FN1 */
80 PORT_273(FN2), /* PORT0_FN2 -> PORT272_FN2 */ 61 PORT_ALL(FN2), /* PORT0_FN2 -> PORT272_FN2 */
81 PORT_273(FN3), /* PORT0_FN3 -> PORT272_FN3 */ 62 PORT_ALL(FN3), /* PORT0_FN3 -> PORT272_FN3 */
82 PORT_273(FN4), /* PORT0_FN4 -> PORT272_FN4 */ 63 PORT_ALL(FN4), /* PORT0_FN4 -> PORT272_FN4 */
83 PORT_273(FN5), /* PORT0_FN5 -> PORT272_FN5 */ 64 PORT_ALL(FN5), /* PORT0_FN5 -> PORT272_FN5 */
84 PORT_273(FN6), /* PORT0_FN6 -> PORT272_FN6 */ 65 PORT_ALL(FN6), /* PORT0_FN6 -> PORT272_FN6 */
85 PORT_273(FN7), /* PORT0_FN7 -> PORT272_FN7 */ 66 PORT_ALL(FN7), /* PORT0_FN7 -> PORT272_FN7 */
86 67
87 MSELBCR_MSEL2_1, MSELBCR_MSEL2_0, 68 MSELBCR_MSEL2_1, MSELBCR_MSEL2_0,
88 PINMUX_FUNCTION_END, 69 PINMUX_FUNCTION_END,
@@ -327,41 +308,6 @@ enum {
327 PINMUX_MARK_END, 308 PINMUX_MARK_END,
328}; 309};
329 310
330#define PORT_DATA_I(nr) \
331 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN)
332
333#define PORT_DATA_I_PD(nr) \
334 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
335 PORT##nr##_IN, PORT##nr##_IN_PD)
336
337#define PORT_DATA_I_PU(nr) \
338 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
339 PORT##nr##_IN, PORT##nr##_IN_PU)
340
341#define PORT_DATA_I_PU_PD(nr) \
342 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
343 PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
344
345#define PORT_DATA_O(nr) \
346 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT)
347
348#define PORT_DATA_IO(nr) \
349 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
350 PORT##nr##_IN)
351
352#define PORT_DATA_IO_PD(nr) \
353 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
354 PORT##nr##_IN, PORT##nr##_IN_PD)
355
356#define PORT_DATA_IO_PU(nr) \
357 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
358 PORT##nr##_IN, PORT##nr##_IN_PU)
359
360#define PORT_DATA_IO_PU_PD(nr) \
361 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
362 PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
363
364
365static pinmux_enum_t pinmux_data[] = { 311static pinmux_enum_t pinmux_data[] = {
366 312
367 /* specify valid pin states for each pin in GPIO mode */ 313 /* specify valid pin states for each pin in GPIO mode */
@@ -1098,13 +1044,9 @@ static pinmux_enum_t pinmux_data[] = {
1098 PINMUX_DATA(DIVLOCK_MARK, PORT272_FN1), 1044 PINMUX_DATA(DIVLOCK_MARK, PORT272_FN1),
1099}; 1045};
1100 1046
1101#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
1102#define GPIO_PORT_273() _273(_GPIO_PORT, , unused)
1103#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
1104
1105static struct pinmux_gpio pinmux_gpios[] = { 1047static struct pinmux_gpio pinmux_gpios[] = {
1106 /* 49-1 -> 49-6 (GPIO) */ 1048 /* 49-1 -> 49-6 (GPIO) */
1107 GPIO_PORT_273(), 1049 GPIO_PORT_ALL(),
1108 1050
1109 /* Special Pull-up / Pull-down Functions */ 1051 /* Special Pull-up / Pull-down Functions */
1110 GPIO_FN(PORT48_KEYIN0_PU), GPIO_FN(PORT49_KEYIN1_PU), 1052 GPIO_FN(PORT48_KEYIN0_PU), GPIO_FN(PORT49_KEYIN1_PU),
@@ -1345,22 +1287,6 @@ static struct pinmux_gpio pinmux_gpios[] = {
1345 GPIO_FN(DIVLOCK), 1287 GPIO_FN(DIVLOCK),
1346}; 1288};
1347 1289
1348/* helper for top 4 bits in PORTnCR */
1349#define PCRH(in, in_pd, in_pu, out) \
1350 0, (out), (in), 0, \
1351 0, 0, 0, 0, \
1352 0, 0, (in_pd), 0, \
1353 0, 0, (in_pu), 0
1354
1355#define PORTCR(nr, reg) \
1356 { PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
1357 PCRH(PORT##nr##_IN, PORT##nr##_IN_PD, \
1358 PORT##nr##_IN_PU, PORT##nr##_OUT), \
1359 PORT##nr##_FN0, PORT##nr##_FN1, PORT##nr##_FN2, \
1360 PORT##nr##_FN3, PORT##nr##_FN4, PORT##nr##_FN5, \
1361 PORT##nr##_FN6, PORT##nr##_FN7 } \
1362 }
1363
1364static struct pinmux_cfg_reg pinmux_config_regs[] = { 1290static struct pinmux_cfg_reg pinmux_config_regs[] = {
1365 PORTCR(0, 0xe6050000), /* PORT0CR */ 1291 PORTCR(0, 0xe6050000), /* PORT0CR */
1366 PORTCR(1, 0xe6050001), /* PORT1CR */ 1292 PORTCR(1, 0xe6050001), /* PORT1CR */
diff --git a/arch/arm/mach-shmobile/pfc-sh7372.c b/arch/arm/mach-shmobile/pfc-sh7372.c
index 9c265dae138a..1bd6585a6acf 100644
--- a/arch/arm/mach-shmobile/pfc-sh7372.c
+++ b/arch/arm/mach-shmobile/pfc-sh7372.c
@@ -25,27 +25,13 @@
25#include <linux/gpio.h> 25#include <linux/gpio.h>
26#include <mach/sh7372.h> 26#include <mach/sh7372.h>
27 27
28#define _1(fn, pfx, sfx) fn(pfx, sfx) 28#define CPU_ALL_PORT(fn, pfx, sfx) \
29 29 PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
30#define _10(fn, pfx, sfx) \ 30 PORT_10(fn, pfx##10, sfx), PORT_10(fn, pfx##11, sfx), \
31 _1(fn, pfx##0, sfx), _1(fn, pfx##1, sfx), \ 31 PORT_10(fn, pfx##12, sfx), PORT_10(fn, pfx##13, sfx), \
32 _1(fn, pfx##2, sfx), _1(fn, pfx##3, sfx), \ 32 PORT_10(fn, pfx##14, sfx), PORT_10(fn, pfx##15, sfx), \
33 _1(fn, pfx##4, sfx), _1(fn, pfx##5, sfx), \ 33 PORT_10(fn, pfx##16, sfx), PORT_10(fn, pfx##17, sfx), \
34 _1(fn, pfx##6, sfx), _1(fn, pfx##7, sfx), \ 34 PORT_10(fn, pfx##18, sfx), PORT_1(fn, pfx##190, sfx)
35 _1(fn, pfx##8, sfx), _1(fn, pfx##9, sfx)
36
37#define _80(fn, pfx, sfx) \
38 _10(fn, pfx##1, sfx), _10(fn, pfx##2, sfx), \
39 _10(fn, pfx##3, sfx), _10(fn, pfx##4, sfx), \
40 _10(fn, pfx##5, sfx), _10(fn, pfx##6, sfx), \
41 _10(fn, pfx##7, sfx), _10(fn, pfx##8, sfx)
42
43#define _190(fn, pfx, sfx) \
44 _10(fn, pfx, sfx), _80(fn, pfx, sfx), _10(fn, pfx##9, sfx), \
45 _10(fn, pfx##10, sfx), _80(fn, pfx##1, sfx), _1(fn, pfx##190, sfx)
46
47#define _PORT(pfx, sfx) pfx##_##sfx
48#define PORT_ALL(str) _190(_PORT, PORT, str)
49 35
50enum { 36enum {
51 PINMUX_RESERVED = 0, 37 PINMUX_RESERVED = 0,
@@ -381,108 +367,124 @@ enum {
381 PINMUX_MARK_END, 367 PINMUX_MARK_END,
382}; 368};
383 369
384/* PORT_DATA_I_PD(nr) */
385#define _I___D(nr) \
386 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
387 PORT##nr##_IN, PORT##nr##_IN_PD)
388
389/* PORT_DATA_I_PU(nr) */
390#define _I__U_(nr) \
391 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
392 PORT##nr##_IN, PORT##nr##_IN_PU)
393
394/* PORT_DATA_I_PU_PD(nr) */
395#define _I__UD(nr) \
396 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
397 PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
398
399/* PORT_DATA_O(nr) */
400#define __O___(nr) \
401 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT)
402
403/* PORT_DATA_IO(nr) */
404#define _IO___(nr) \
405 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
406 PORT##nr##_IN)
407
408/* PORT_DATA_IO_PD(nr) */
409#define _IO__D(nr) \
410 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
411 PORT##nr##_IN, PORT##nr##_IN_PD)
412
413/* PORT_DATA_IO_PU(nr) */
414#define _IO_U_(nr) \
415 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
416 PORT##nr##_IN, PORT##nr##_IN_PU)
417
418/* PORT_DATA_IO_PU_PD(nr) */
419#define _IO_UD(nr) \
420 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
421 PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
422
423
424static pinmux_enum_t pinmux_data[] = { 370static pinmux_enum_t pinmux_data[] = {
425 371
426 /* specify valid pin states for each pin in GPIO mode */ 372 /* specify valid pin states for each pin in GPIO mode */
427 373 PORT_DATA_IO_PD(0), PORT_DATA_IO_PD(1),
428 _IO__D(0), _IO__D(1), __O___(2), _I___D(3), _I___D(4), 374 PORT_DATA_O(2), PORT_DATA_I_PD(3),
429 _I___D(5), _IO_UD(6), _I___D(7), _IO__D(8), __O___(9), 375 PORT_DATA_I_PD(4), PORT_DATA_I_PD(5),
430 376 PORT_DATA_IO_PU_PD(6), PORT_DATA_I_PD(7),
431 __O___(10), __O___(11), _IO_UD(12), _IO__D(13), _IO__D(14), 377 PORT_DATA_IO_PD(8), PORT_DATA_O(9),
432 __O___(15), _IO__D(16), _IO__D(17), _I___D(18), _IO___(19), 378
433 379 PORT_DATA_O(10), PORT_DATA_O(11),
434 _IO___(20), _IO___(21), _IO___(22), _IO___(23), _IO___(24), 380 PORT_DATA_IO_PU_PD(12), PORT_DATA_IO_PD(13),
435 _IO___(25), _IO___(26), _IO___(27), _IO___(28), _IO___(29), 381 PORT_DATA_IO_PD(14), PORT_DATA_O(15),
436 382 PORT_DATA_IO_PD(16), PORT_DATA_IO_PD(17),
437 _IO___(30), _IO___(31), _IO___(32), _IO___(33), _IO___(34), 383 PORT_DATA_I_PD(18), PORT_DATA_IO(19),
438 _IO___(35), _IO___(36), _IO___(37), _IO___(38), _IO___(39), 384
439 385 PORT_DATA_IO(20), PORT_DATA_IO(21),
440 _IO___(40), _IO___(41), _IO___(42), _IO___(43), _IO___(44), 386 PORT_DATA_IO(22), PORT_DATA_IO(23),
441 _IO___(45), _IO_U_(46), _IO_U_(47), _IO_U_(48), _IO_U_(49), 387 PORT_DATA_IO(24), PORT_DATA_IO(25),
442 388 PORT_DATA_IO(26), PORT_DATA_IO(27),
443 _IO_U_(50), _IO_U_(51), _IO_U_(52), _IO_U_(53), _IO_U_(54), 389 PORT_DATA_IO(28), PORT_DATA_IO(29),
444 _IO_U_(55), _IO_U_(56), _IO_U_(57), _IO_U_(58), _IO_U_(59), 390
445 391 PORT_DATA_IO(30), PORT_DATA_IO(31),
446 _IO_U_(60), _IO_U_(61), _IO___(62), __O___(63), __O___(64), 392 PORT_DATA_IO(32), PORT_DATA_IO(33),
447 _IO_U_(65), __O___(66), _IO_U_(67), __O___(68), _IO___(69), /*66?*/ 393 PORT_DATA_IO(34), PORT_DATA_IO(35),
448 394 PORT_DATA_IO(36), PORT_DATA_IO(37),
449 _IO___(70), _IO___(71), __O___(72), _I__U_(73), _I__UD(74), 395 PORT_DATA_IO(38), PORT_DATA_IO(39),
450 _IO_UD(75), _IO_UD(76), _IO_UD(77), _IO_UD(78), _IO_UD(79), 396
451 397 PORT_DATA_IO(40), PORT_DATA_IO(41),
452 _IO_UD(80), _IO_UD(81), _IO_UD(82), _IO_UD(83), _IO_UD(84), 398 PORT_DATA_IO(42), PORT_DATA_IO(43),
453 _IO_UD(85), _IO_UD(86), _IO_UD(87), _IO_UD(88), _IO_UD(89), 399 PORT_DATA_IO(44), PORT_DATA_IO(45),
454 400 PORT_DATA_IO_PU(46), PORT_DATA_IO_PU(47),
455 _IO_UD(90), _IO_UD(91), _IO_UD(92), _IO_UD(93), _IO_UD(94), 401 PORT_DATA_IO_PU(48), PORT_DATA_IO_PU(49),
456 _IO_UD(95), _IO_U_(96), _IO_UD(97), _IO_UD(98), __O___(99), /*99?*/ 402
457 403 PORT_DATA_IO_PU(50), PORT_DATA_IO_PU(51),
458 _IO__D(100), _IO__D(101), _IO__D(102), _IO__D(103), _IO__D(104), 404 PORT_DATA_IO_PU(52), PORT_DATA_IO_PU(53),
459 _IO__D(105), _IO_U_(106), _IO_U_(107), _IO_U_(108), _IO_U_(109), 405 PORT_DATA_IO_PU(54), PORT_DATA_IO_PU(55),
460 406 PORT_DATA_IO_PU(56), PORT_DATA_IO_PU(57),
461 _IO_U_(110), _IO_U_(111), _IO__D(112), _IO__D(113), _IO_U_(114), 407 PORT_DATA_IO_PU(58), PORT_DATA_IO_PU(59),
462 _IO_U_(115), _IO_U_(116), _IO_U_(117), _IO_U_(118), _IO_U_(119), 408
463 409 PORT_DATA_IO_PU(60), PORT_DATA_IO_PU(61),
464 _IO_U_(120), _IO__D(121), _IO__D(122), _IO__D(123), _IO__D(124), 410 PORT_DATA_IO(62), PORT_DATA_O(63),
465 _IO__D(125), _IO__D(126), _IO__D(127), _IO__D(128), _IO_UD(129), 411 PORT_DATA_O(64), PORT_DATA_IO_PU(65),
466 412 PORT_DATA_O(66), PORT_DATA_IO_PU(67), /*66?*/
467 _IO_UD(130), _IO_UD(131), _IO_UD(132), _IO_UD(133), _IO_UD(134), 413 PORT_DATA_O(68), PORT_DATA_IO(69),
468 _IO_UD(135), _IO__D(136), _IO__D(137), _IO__D(138), _IO__D(139), 414
469 415 PORT_DATA_IO(70), PORT_DATA_IO(71),
470 _IO__D(140), _IO__D(141), _IO__D(142), _IO_UD(143), _IO__D(144), 416 PORT_DATA_O(72), PORT_DATA_I_PU(73),
471 _IO__D(145), _IO__D(146), _IO__D(147), _IO__D(148), _IO__D(149), 417 PORT_DATA_I_PU_PD(74), PORT_DATA_IO_PU_PD(75),
472 418 PORT_DATA_IO_PU_PD(76), PORT_DATA_IO_PU_PD(77),
473 _IO__D(150), _IO__D(151), _IO_UD(152), _I___D(153), _IO_UD(154), 419 PORT_DATA_IO_PU_PD(78), PORT_DATA_IO_PU_PD(79),
474 _I___D(155), _IO__D(156), _IO__D(157), _I___D(158), _IO__D(159), 420
475 421 PORT_DATA_IO_PU_PD(80), PORT_DATA_IO_PU_PD(81),
476 __O___(160), _IO__D(161), _IO__D(162), _IO__D(163), _I___D(164), 422 PORT_DATA_IO_PU_PD(82), PORT_DATA_IO_PU_PD(83),
477 _IO__D(165), _I___D(166), _I___D(167), _I___D(168), _I___D(169), 423 PORT_DATA_IO_PU_PD(84), PORT_DATA_IO_PU_PD(85),
478 424 PORT_DATA_IO_PU_PD(86), PORT_DATA_IO_PU_PD(87),
479 _I___D(170), __O___(171), _IO_UD(172), _IO_UD(173), _IO_UD(174), 425 PORT_DATA_IO_PU_PD(88), PORT_DATA_IO_PU_PD(89),
480 _IO_UD(175), _IO_UD(176), _IO_UD(177), _IO_UD(178), __O___(179), 426
481 427 PORT_DATA_IO_PU_PD(90), PORT_DATA_IO_PU_PD(91),
482 _IO_UD(180), _IO_UD(181), _IO_UD(182), _IO_UD(183), _IO_UD(184), 428 PORT_DATA_IO_PU_PD(92), PORT_DATA_IO_PU_PD(93),
483 __O___(185), _IO_UD(186), _IO_UD(187), _IO_UD(188), _IO_UD(189), 429 PORT_DATA_IO_PU_PD(94), PORT_DATA_IO_PU_PD(95),
484 430 PORT_DATA_IO_PU(96), PORT_DATA_IO_PU_PD(97),
485 _IO_UD(190), 431 PORT_DATA_IO_PU_PD(98), PORT_DATA_O(99), /*99?*/
432
433 PORT_DATA_IO_PD(100), PORT_DATA_IO_PD(101),
434 PORT_DATA_IO_PD(102), PORT_DATA_IO_PD(103),
435 PORT_DATA_IO_PD(104), PORT_DATA_IO_PD(105),
436 PORT_DATA_IO_PU(106), PORT_DATA_IO_PU(107),
437 PORT_DATA_IO_PU(108), PORT_DATA_IO_PU(109),
438
439 PORT_DATA_IO_PU(110), PORT_DATA_IO_PU(111),
440 PORT_DATA_IO_PD(112), PORT_DATA_IO_PD(113),
441 PORT_DATA_IO_PU(114), PORT_DATA_IO_PU(115),
442 PORT_DATA_IO_PU(116), PORT_DATA_IO_PU(117),
443 PORT_DATA_IO_PU(118), PORT_DATA_IO_PU(119),
444
445 PORT_DATA_IO_PU(120), PORT_DATA_IO_PD(121),
446 PORT_DATA_IO_PD(122), PORT_DATA_IO_PD(123),
447 PORT_DATA_IO_PD(124), PORT_DATA_IO_PD(125),
448 PORT_DATA_IO_PD(126), PORT_DATA_IO_PD(127),
449 PORT_DATA_IO_PD(128), PORT_DATA_IO_PU_PD(129),
450
451 PORT_DATA_IO_PU_PD(130), PORT_DATA_IO_PU_PD(131),
452 PORT_DATA_IO_PU_PD(132), PORT_DATA_IO_PU_PD(133),
453 PORT_DATA_IO_PU_PD(134), PORT_DATA_IO_PU_PD(135),
454 PORT_DATA_IO_PD(136), PORT_DATA_IO_PD(137),
455 PORT_DATA_IO_PD(138), PORT_DATA_IO_PD(139),
456
457 PORT_DATA_IO_PD(140), PORT_DATA_IO_PD(141),
458 PORT_DATA_IO_PD(142), PORT_DATA_IO_PU_PD(143),
459 PORT_DATA_IO_PD(144), PORT_DATA_IO_PD(145),
460 PORT_DATA_IO_PD(146), PORT_DATA_IO_PD(147),
461 PORT_DATA_IO_PD(148), PORT_DATA_IO_PD(149),
462
463 PORT_DATA_IO_PD(150), PORT_DATA_IO_PD(151),
464 PORT_DATA_IO_PU_PD(152), PORT_DATA_I_PD(153),
465 PORT_DATA_IO_PU_PD(154), PORT_DATA_I_PD(155),
466 PORT_DATA_IO_PD(156), PORT_DATA_IO_PD(157),
467 PORT_DATA_I_PD(158), PORT_DATA_IO_PD(159),
468
469 PORT_DATA_O(160), PORT_DATA_IO_PD(161),
470 PORT_DATA_IO_PD(162), PORT_DATA_IO_PD(163),
471 PORT_DATA_I_PD(164), PORT_DATA_IO_PD(165),
472 PORT_DATA_I_PD(166), PORT_DATA_I_PD(167),
473 PORT_DATA_I_PD(168), PORT_DATA_I_PD(169),
474
475 PORT_DATA_I_PD(170), PORT_DATA_O(171),
476 PORT_DATA_IO_PU_PD(172), PORT_DATA_IO_PU_PD(173),
477 PORT_DATA_IO_PU_PD(174), PORT_DATA_IO_PU_PD(175),
478 PORT_DATA_IO_PU_PD(176), PORT_DATA_IO_PU_PD(177),
479 PORT_DATA_IO_PU_PD(178), PORT_DATA_O(179),
480
481 PORT_DATA_IO_PU_PD(180), PORT_DATA_IO_PU_PD(181),
482 PORT_DATA_IO_PU_PD(182), PORT_DATA_IO_PU_PD(183),
483 PORT_DATA_IO_PU_PD(184), PORT_DATA_O(185),
484 PORT_DATA_IO_PU_PD(186), PORT_DATA_IO_PU_PD(187),
485 PORT_DATA_IO_PU_PD(188), PORT_DATA_IO_PU_PD(189),
486
487 PORT_DATA_IO_PU_PD(190),
486 488
487 /* IRQ */ 489 /* IRQ */
488 PINMUX_DATA(IRQ0_6_MARK, PORT6_FN0, MSEL1CR_0_0), 490 PINMUX_DATA(IRQ0_6_MARK, PORT6_FN0, MSEL1CR_0_0),
@@ -926,10 +928,6 @@ static pinmux_enum_t pinmux_data[] = {
926 PINMUX_DATA(MFIv4_MARK, MSEL4CR_6_1), 928 PINMUX_DATA(MFIv4_MARK, MSEL4CR_6_1),
927}; 929};
928 930
929#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
930#define GPIO_PORT_ALL() _190(_GPIO_PORT, , unused)
931#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
932
933static struct pinmux_gpio pinmux_gpios[] = { 931static struct pinmux_gpio pinmux_gpios[] = {
934 932
935 /* PORT */ 933 /* PORT */
@@ -1201,22 +1199,6 @@ static struct pinmux_gpio pinmux_gpios[] = {
1201 GPIO_FN(SDENC_DV_CLKI), 1199 GPIO_FN(SDENC_DV_CLKI),
1202}; 1200};
1203 1201
1204/* helper for top 4 bits in PORTnCR */
1205#define PCRH(in, in_pd, in_pu, out) \
1206 0, (out), (in), 0, \
1207 0, 0, 0, 0, \
1208 0, 0, (in_pd), 0, \
1209 0, 0, (in_pu), 0
1210
1211#define PORTCR(nr, reg) \
1212 { PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
1213 PCRH(PORT##nr##_IN, PORT##nr##_IN_PD, \
1214 PORT##nr##_IN_PU, PORT##nr##_OUT), \
1215 PORT##nr##_FN0, PORT##nr##_FN1, PORT##nr##_FN2, \
1216 PORT##nr##_FN3, PORT##nr##_FN4, PORT##nr##_FN5, \
1217 PORT##nr##_FN6, PORT##nr##_FN7 } \
1218 }
1219
1220static struct pinmux_cfg_reg pinmux_config_regs[] = { 1202static struct pinmux_cfg_reg pinmux_config_regs[] = {
1221 PORTCR(0, 0xE6051000), /* PORT0CR */ 1203 PORTCR(0, 0xE6051000), /* PORT0CR */
1222 PORTCR(1, 0xE6051001), /* PORT1CR */ 1204 PORTCR(1, 0xE6051001), /* PORT1CR */
diff --git a/arch/arm/mach-shmobile/pfc-sh7377.c b/arch/arm/mach-shmobile/pfc-sh7377.c
index 613e6842ad05..2f10511946ad 100644
--- a/arch/arm/mach-shmobile/pfc-sh7377.c
+++ b/arch/arm/mach-shmobile/pfc-sh7377.c
@@ -22,84 +22,65 @@
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <mach/sh7377.h> 23#include <mach/sh7377.h>
24 24
25#define _1(fn, pfx, sfx) fn(pfx, sfx) 25#define CPU_ALL_PORT(fn, pfx, sfx) \
26 26 PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
27#define _10(fn, pfx, sfx) \ 27 PORT_10(fn, pfx##10, sfx), \
28 _1(fn, pfx##0, sfx), _1(fn, pfx##1, sfx), \ 28 PORT_1(fn, pfx##110, sfx), PORT_1(fn, pfx##111, sfx), \
29 _1(fn, pfx##2, sfx), _1(fn, pfx##3, sfx), \ 29 PORT_1(fn, pfx##112, sfx), PORT_1(fn, pfx##113, sfx), \
30 _1(fn, pfx##4, sfx), _1(fn, pfx##5, sfx), \ 30 PORT_1(fn, pfx##114, sfx), PORT_1(fn, pfx##115, sfx), \
31 _1(fn, pfx##6, sfx), _1(fn, pfx##7, sfx), \ 31 PORT_1(fn, pfx##116, sfx), PORT_1(fn, pfx##117, sfx), \
32 _1(fn, pfx##8, sfx), _1(fn, pfx##9, sfx) 32 PORT_1(fn, pfx##118, sfx), \
33 33 PORT_1(fn, pfx##128, sfx), PORT_1(fn, pfx##129, sfx), \
34#define _90(fn, pfx, sfx) \ 34 PORT_10(fn, pfx##13, sfx), PORT_10(fn, pfx##14, sfx), \
35 _10(fn, pfx##1, sfx), _10(fn, pfx##2, sfx), \ 35 PORT_10(fn, pfx##15, sfx), \
36 _10(fn, pfx##3, sfx), _10(fn, pfx##4, sfx), \ 36 PORT_1(fn, pfx##160, sfx), PORT_1(fn, pfx##161, sfx), \
37 _10(fn, pfx##5, sfx), _10(fn, pfx##6, sfx), \ 37 PORT_1(fn, pfx##162, sfx), PORT_1(fn, pfx##163, sfx), \
38 _10(fn, pfx##7, sfx), _10(fn, pfx##8, sfx), \ 38 PORT_1(fn, pfx##164, sfx), \
39 _10(fn, pfx##9, sfx) 39 PORT_1(fn, pfx##192, sfx), PORT_1(fn, pfx##193, sfx), \
40 40 PORT_1(fn, pfx##194, sfx), PORT_1(fn, pfx##195, sfx), \
41#define _265(fn, pfx, sfx) \ 41 PORT_1(fn, pfx##196, sfx), PORT_1(fn, pfx##197, sfx), \
42 _10(fn, pfx, sfx), _90(fn, pfx, sfx), \ 42 PORT_1(fn, pfx##198, sfx), PORT_1(fn, pfx##199, sfx), \
43 _10(fn, pfx##10, sfx), \ 43 PORT_10(fn, pfx##20, sfx), PORT_10(fn, pfx##21, sfx), \
44 _1(fn, pfx##110, sfx), _1(fn, pfx##111, sfx), \ 44 PORT_10(fn, pfx##22, sfx), PORT_10(fn, pfx##23, sfx), \
45 _1(fn, pfx##112, sfx), _1(fn, pfx##113, sfx), \ 45 PORT_10(fn, pfx##24, sfx), PORT_10(fn, pfx##25, sfx), \
46 _1(fn, pfx##114, sfx), _1(fn, pfx##115, sfx), \ 46 PORT_1(fn, pfx##260, sfx), PORT_1(fn, pfx##261, sfx), \
47 _1(fn, pfx##116, sfx), _1(fn, pfx##117, sfx), \ 47 PORT_1(fn, pfx##262, sfx), PORT_1(fn, pfx##263, sfx), \
48 _1(fn, pfx##118, sfx), \ 48 PORT_1(fn, pfx##264, sfx)
49 _1(fn, pfx##128, sfx), _1(fn, pfx##129, sfx), \
50 _10(fn, pfx##13, sfx), _10(fn, pfx##14, sfx), \
51 _10(fn, pfx##15, sfx), \
52 _1(fn, pfx##160, sfx), _1(fn, pfx##161, sfx), \
53 _1(fn, pfx##162, sfx), _1(fn, pfx##163, sfx), \
54 _1(fn, pfx##164, sfx), \
55 _1(fn, pfx##192, sfx), _1(fn, pfx##193, sfx), \
56 _1(fn, pfx##194, sfx), _1(fn, pfx##195, sfx), \
57 _1(fn, pfx##196, sfx), _1(fn, pfx##197, sfx), \
58 _1(fn, pfx##198, sfx), _1(fn, pfx##199, sfx), \
59 _10(fn, pfx##20, sfx), _10(fn, pfx##21, sfx), \
60 _10(fn, pfx##22, sfx), _10(fn, pfx##23, sfx), \
61 _10(fn, pfx##24, sfx), _10(fn, pfx##25, sfx), \
62 _1(fn, pfx##260, sfx), _1(fn, pfx##261, sfx), \
63 _1(fn, pfx##262, sfx), _1(fn, pfx##263, sfx), \
64 _1(fn, pfx##264, sfx)
65
66#define _PORT(pfx, sfx) pfx##_##sfx
67#define PORT_265(str) _265(_PORT, PORT, str)
68 49
69enum { 50enum {
70 PINMUX_RESERVED = 0, 51 PINMUX_RESERVED = 0,
71 52
72 PINMUX_DATA_BEGIN, 53 PINMUX_DATA_BEGIN,
73 PORT_265(DATA), /* PORT0_DATA -> PORT264_DATA */ 54 PORT_ALL(DATA), /* PORT0_DATA -> PORT264_DATA */
74 PINMUX_DATA_END, 55 PINMUX_DATA_END,
75 56
76 PINMUX_INPUT_BEGIN, 57 PINMUX_INPUT_BEGIN,
77 PORT_265(IN), /* PORT0_IN -> PORT264_IN */ 58 PORT_ALL(IN), /* PORT0_IN -> PORT264_IN */
78 PINMUX_INPUT_END, 59 PINMUX_INPUT_END,
79 60
80 PINMUX_INPUT_PULLUP_BEGIN, 61 PINMUX_INPUT_PULLUP_BEGIN,
81 PORT_265(IN_PU), /* PORT0_IN_PU -> PORT264_IN_PU */ 62 PORT_ALL(IN_PU), /* PORT0_IN_PU -> PORT264_IN_PU */
82 PINMUX_INPUT_PULLUP_END, 63 PINMUX_INPUT_PULLUP_END,
83 64
84 PINMUX_INPUT_PULLDOWN_BEGIN, 65 PINMUX_INPUT_PULLDOWN_BEGIN,
85 PORT_265(IN_PD), /* PORT0_IN_PD -> PORT264_IN_PD */ 66 PORT_ALL(IN_PD), /* PORT0_IN_PD -> PORT264_IN_PD */
86 PINMUX_INPUT_PULLDOWN_END, 67 PINMUX_INPUT_PULLDOWN_END,
87 68
88 PINMUX_OUTPUT_BEGIN, 69 PINMUX_OUTPUT_BEGIN,
89 PORT_265(OUT), /* PORT0_OUT -> PORT264_OUT */ 70 PORT_ALL(OUT), /* PORT0_OUT -> PORT264_OUT */
90 PINMUX_OUTPUT_END, 71 PINMUX_OUTPUT_END,
91 72
92 PINMUX_FUNCTION_BEGIN, 73 PINMUX_FUNCTION_BEGIN,
93 PORT_265(FN_IN), /* PORT0_FN_IN -> PORT264_FN_IN */ 74 PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT264_FN_IN */
94 PORT_265(FN_OUT), /* PORT0_FN_OUT -> PORT264_FN_OUT */ 75 PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT264_FN_OUT */
95 PORT_265(FN0), /* PORT0_FN0 -> PORT264_FN0 */ 76 PORT_ALL(FN0), /* PORT0_FN0 -> PORT264_FN0 */
96 PORT_265(FN1), /* PORT0_FN1 -> PORT264_FN1 */ 77 PORT_ALL(FN1), /* PORT0_FN1 -> PORT264_FN1 */
97 PORT_265(FN2), /* PORT0_FN2 -> PORT264_FN2 */ 78 PORT_ALL(FN2), /* PORT0_FN2 -> PORT264_FN2 */
98 PORT_265(FN3), /* PORT0_FN3 -> PORT264_FN3 */ 79 PORT_ALL(FN3), /* PORT0_FN3 -> PORT264_FN3 */
99 PORT_265(FN4), /* PORT0_FN4 -> PORT264_FN4 */ 80 PORT_ALL(FN4), /* PORT0_FN4 -> PORT264_FN4 */
100 PORT_265(FN5), /* PORT0_FN5 -> PORT264_FN5 */ 81 PORT_ALL(FN5), /* PORT0_FN5 -> PORT264_FN5 */
101 PORT_265(FN6), /* PORT0_FN6 -> PORT264_FN6 */ 82 PORT_ALL(FN6), /* PORT0_FN6 -> PORT264_FN6 */
102 PORT_265(FN7), /* PORT0_FN7 -> PORT264_FN7 */ 83 PORT_ALL(FN7), /* PORT0_FN7 -> PORT264_FN7 */
103 84
104 MSELBCR_MSEL17_1, MSELBCR_MSEL17_0, 85 MSELBCR_MSEL17_1, MSELBCR_MSEL17_0,
105 MSELBCR_MSEL16_1, MSELBCR_MSEL16_0, 86 MSELBCR_MSEL16_1, MSELBCR_MSEL16_0,
@@ -360,45 +341,6 @@ enum {
360 PINMUX_MARK_END, 341 PINMUX_MARK_END,
361}; 342};
362 343
363#define PORT_DATA_I(nr) \
364 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN)
365
366#define PORT_DATA_I_PD(nr) \
367 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
368 PORT##nr##_IN, PORT##nr##_IN_PD)
369
370#define PORT_DATA_I_PU(nr) \
371 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
372 PORT##nr##_IN, PORT##nr##_IN_PU)
373
374#define PORT_DATA_I_PU_PD(nr) \
375 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
376 PORT##nr##_IN, PORT##nr##_IN_PD, \
377 PORT##nr##_IN_PU)
378
379#define PORT_DATA_O(nr) \
380 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
381 PORT##nr##_OUT)
382
383#define PORT_DATA_IO(nr) \
384 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
385 PORT##nr##_OUT, PORT##nr##_IN)
386
387#define PORT_DATA_IO_PD(nr) \
388 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
389 PORT##nr##_OUT, PORT##nr##_IN, \
390 PORT##nr##_IN_PD)
391
392#define PORT_DATA_IO_PU(nr) \
393 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
394 PORT##nr##_OUT, PORT##nr##_IN, \
395 PORT##nr##_IN_PU)
396
397#define PORT_DATA_IO_PU_PD(nr) \
398 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
399 PORT##nr##_OUT, PORT##nr##_IN, \
400 PORT##nr##_IN_PD, PORT##nr##_IN_PU)
401
402static pinmux_enum_t pinmux_data[] = { 344static pinmux_enum_t pinmux_data[] = {
403 /* specify valid pin states for each pin in GPIO mode */ 345 /* specify valid pin states for each pin in GPIO mode */
404 /* 55-1 (GPIO) */ 346 /* 55-1 (GPIO) */
@@ -1078,13 +1020,9 @@ static pinmux_enum_t pinmux_data[] = {
1078 PINMUX_DATA(RESETOUTS_MARK, PORT264_FN1), 1020 PINMUX_DATA(RESETOUTS_MARK, PORT264_FN1),
1079}; 1021};
1080 1022
1081#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
1082#define GPIO_PORT_265() _265(_GPIO_PORT, , unused)
1083#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
1084
1085static struct pinmux_gpio pinmux_gpios[] = { 1023static struct pinmux_gpio pinmux_gpios[] = {
1086 /* 55-1 -> 55-5 (GPIO) */ 1024 /* 55-1 -> 55-5 (GPIO) */
1087 GPIO_PORT_265(), 1025 GPIO_PORT_ALL(),
1088 1026
1089 /* Special Pull-up / Pull-down Functions */ 1027 /* Special Pull-up / Pull-down Functions */
1090 GPIO_FN(PORT66_KEYIN0_PU), GPIO_FN(PORT67_KEYIN1_PU), 1028 GPIO_FN(PORT66_KEYIN0_PU), GPIO_FN(PORT67_KEYIN1_PU),
@@ -1362,23 +1300,6 @@ static struct pinmux_gpio pinmux_gpios[] = {
1362 GPIO_FN(RESETOUTS), 1300 GPIO_FN(RESETOUTS),
1363}; 1301};
1364 1302
1365/* helper for top 4 bits in PORTnCR */
1366#define PCRH(in, in_pd, in_pu, out) \
1367 0, (out), (in), 0, \
1368 0, 0, 0, 0, \
1369 0, 0, (in_pd), 0, \
1370 0, 0, (in_pu), 0
1371
1372#define PORTCR(nr, reg) \
1373 { PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
1374 PCRH(PORT##nr##_IN, PORT##nr##_IN_PD, \
1375 PORT##nr##_IN_PU, PORT##nr##_OUT), \
1376 PORT##nr##_FN0, PORT##nr##_FN1, \
1377 PORT##nr##_FN2, PORT##nr##_FN3, \
1378 PORT##nr##_FN4, PORT##nr##_FN5, \
1379 PORT##nr##_FN6, PORT##nr##_FN7 } \
1380 }
1381
1382static struct pinmux_cfg_reg pinmux_config_regs[] = { 1303static struct pinmux_cfg_reg pinmux_config_regs[] = {
1383 PORTCR(0, 0xe6050000), /* PORT0CR */ 1304 PORTCR(0, 0xe6050000), /* PORT0CR */
1384 PORTCR(1, 0xe6050001), /* PORT1CR */ 1305 PORTCR(1, 0xe6050001), /* PORT1CR */
diff --git a/arch/arm/mach-shmobile/pfc-sh73a0.c b/arch/arm/mach-shmobile/pfc-sh73a0.c
index 5abe02fbd6b9..e05634ce2e0d 100644
--- a/arch/arm/mach-shmobile/pfc-sh73a0.c
+++ b/arch/arm/mach-shmobile/pfc-sh73a0.c
@@ -24,83 +24,71 @@
24#include <mach/sh73a0.h> 24#include <mach/sh73a0.h>
25#include <mach/irqs.h> 25#include <mach/irqs.h>
26 26
27#define _1(fn, pfx, sfx) fn(pfx, sfx) 27#define CPU_ALL_PORT(fn, pfx, sfx) \
28 28 PORT_10(fn, pfx, sfx), PORT_10(fn, pfx##1, sfx), \
29#define _10(fn, pfx, sfx) \ 29 PORT_10(fn, pfx##2, sfx), PORT_10(fn, pfx##3, sfx), \
30 _1(fn, pfx##0, sfx), _1(fn, pfx##1, sfx), \ 30 PORT_10(fn, pfx##4, sfx), PORT_10(fn, pfx##5, sfx), \
31 _1(fn, pfx##2, sfx), _1(fn, pfx##3, sfx), \ 31 PORT_10(fn, pfx##6, sfx), PORT_10(fn, pfx##7, sfx), \
32 _1(fn, pfx##4, sfx), _1(fn, pfx##5, sfx), \ 32 PORT_10(fn, pfx##8, sfx), PORT_10(fn, pfx##9, sfx), \
33 _1(fn, pfx##6, sfx), _1(fn, pfx##7, sfx), \ 33 PORT_10(fn, pfx##10, sfx), \
34 _1(fn, pfx##8, sfx), _1(fn, pfx##9, sfx) 34 PORT_1(fn, pfx##110, sfx), PORT_1(fn, pfx##111, sfx), \
35 35 PORT_1(fn, pfx##112, sfx), PORT_1(fn, pfx##113, sfx), \
36#define _310(fn, pfx, sfx) \ 36 PORT_1(fn, pfx##114, sfx), PORT_1(fn, pfx##115, sfx), \
37 _10(fn, pfx, sfx), _10(fn, pfx##1, sfx), \ 37 PORT_1(fn, pfx##116, sfx), PORT_1(fn, pfx##117, sfx), \
38 _10(fn, pfx##2, sfx), _10(fn, pfx##3, sfx), \ 38 PORT_1(fn, pfx##118, sfx), \
39 _10(fn, pfx##4, sfx), _10(fn, pfx##5, sfx), \ 39 PORT_1(fn, pfx##128, sfx), PORT_1(fn, pfx##129, sfx), \
40 _10(fn, pfx##6, sfx), _10(fn, pfx##7, sfx), \ 40 PORT_10(fn, pfx##13, sfx), PORT_10(fn, pfx##14, sfx), \
41 _10(fn, pfx##8, sfx), _10(fn, pfx##9, sfx), \ 41 PORT_10(fn, pfx##15, sfx), \
42 _10(fn, pfx##10, sfx), \ 42 PORT_1(fn, pfx##160, sfx), PORT_1(fn, pfx##161, sfx), \
43 _1(fn, pfx##110, sfx), _1(fn, pfx##111, sfx), \ 43 PORT_1(fn, pfx##162, sfx), PORT_1(fn, pfx##163, sfx), \
44 _1(fn, pfx##112, sfx), _1(fn, pfx##113, sfx), \ 44 PORT_1(fn, pfx##164, sfx), \
45 _1(fn, pfx##114, sfx), _1(fn, pfx##115, sfx), \ 45 PORT_1(fn, pfx##192, sfx), PORT_1(fn, pfx##193, sfx), \
46 _1(fn, pfx##116, sfx), _1(fn, pfx##117, sfx), \ 46 PORT_1(fn, pfx##194, sfx), PORT_1(fn, pfx##195, sfx), \
47 _1(fn, pfx##118, sfx), \ 47 PORT_1(fn, pfx##196, sfx), PORT_1(fn, pfx##197, sfx), \
48 _1(fn, pfx##128, sfx), _1(fn, pfx##129, sfx), \ 48 PORT_1(fn, pfx##198, sfx), PORT_1(fn, pfx##199, sfx), \
49 _10(fn, pfx##13, sfx), _10(fn, pfx##14, sfx), \ 49 PORT_10(fn, pfx##20, sfx), PORT_10(fn, pfx##21, sfx), \
50 _10(fn, pfx##15, sfx), \ 50 PORT_10(fn, pfx##22, sfx), PORT_10(fn, pfx##23, sfx), \
51 _1(fn, pfx##160, sfx), _1(fn, pfx##161, sfx), \ 51 PORT_10(fn, pfx##24, sfx), PORT_10(fn, pfx##25, sfx), \
52 _1(fn, pfx##162, sfx), _1(fn, pfx##163, sfx), \ 52 PORT_10(fn, pfx##26, sfx), PORT_10(fn, pfx##27, sfx), \
53 _1(fn, pfx##164, sfx), \ 53 PORT_1(fn, pfx##280, sfx), PORT_1(fn, pfx##281, sfx), \
54 _1(fn, pfx##192, sfx), _1(fn, pfx##193, sfx), \ 54 PORT_1(fn, pfx##282, sfx), \
55 _1(fn, pfx##194, sfx), _1(fn, pfx##195, sfx), \ 55 PORT_1(fn, pfx##288, sfx), PORT_1(fn, pfx##289, sfx), \
56 _1(fn, pfx##196, sfx), _1(fn, pfx##197, sfx), \ 56 PORT_10(fn, pfx##29, sfx), PORT_10(fn, pfx##30, sfx)
57 _1(fn, pfx##198, sfx), _1(fn, pfx##199, sfx), \
58 _10(fn, pfx##20, sfx), _10(fn, pfx##21, sfx), \
59 _10(fn, pfx##22, sfx), _10(fn, pfx##23, sfx), \
60 _10(fn, pfx##24, sfx), _10(fn, pfx##25, sfx), \
61 _10(fn, pfx##26, sfx), _10(fn, pfx##27, sfx), \
62 _1(fn, pfx##280, sfx), _1(fn, pfx##281, sfx), \
63 _1(fn, pfx##282, sfx), \
64 _1(fn, pfx##288, sfx), _1(fn, pfx##289, sfx), \
65 _10(fn, pfx##29, sfx), _10(fn, pfx##30, sfx)
66
67#define _PORT(pfx, sfx) pfx##_##sfx
68#define PORT_310(str) _310(_PORT, PORT, str)
69 57
70enum { 58enum {
71 PINMUX_RESERVED = 0, 59 PINMUX_RESERVED = 0,
72 60
73 PINMUX_DATA_BEGIN, 61 PINMUX_DATA_BEGIN,
74 PORT_310(DATA), /* PORT0_DATA -> PORT309_DATA */ 62 PORT_ALL(DATA), /* PORT0_DATA -> PORT309_DATA */
75 PINMUX_DATA_END, 63 PINMUX_DATA_END,
76 64
77 PINMUX_INPUT_BEGIN, 65 PINMUX_INPUT_BEGIN,
78 PORT_310(IN), /* PORT0_IN -> PORT309_IN */ 66 PORT_ALL(IN), /* PORT0_IN -> PORT309_IN */
79 PINMUX_INPUT_END, 67 PINMUX_INPUT_END,
80 68
81 PINMUX_INPUT_PULLUP_BEGIN, 69 PINMUX_INPUT_PULLUP_BEGIN,
82 PORT_310(IN_PU), /* PORT0_IN_PU -> PORT309_IN_PU */ 70 PORT_ALL(IN_PU), /* PORT0_IN_PU -> PORT309_IN_PU */
83 PINMUX_INPUT_PULLUP_END, 71 PINMUX_INPUT_PULLUP_END,
84 72
85 PINMUX_INPUT_PULLDOWN_BEGIN, 73 PINMUX_INPUT_PULLDOWN_BEGIN,
86 PORT_310(IN_PD), /* PORT0_IN_PD -> PORT309_IN_PD */ 74 PORT_ALL(IN_PD), /* PORT0_IN_PD -> PORT309_IN_PD */
87 PINMUX_INPUT_PULLDOWN_END, 75 PINMUX_INPUT_PULLDOWN_END,
88 76
89 PINMUX_OUTPUT_BEGIN, 77 PINMUX_OUTPUT_BEGIN,
90 PORT_310(OUT), /* PORT0_OUT -> PORT309_OUT */ 78 PORT_ALL(OUT), /* PORT0_OUT -> PORT309_OUT */
91 PINMUX_OUTPUT_END, 79 PINMUX_OUTPUT_END,
92 80
93 PINMUX_FUNCTION_BEGIN, 81 PINMUX_FUNCTION_BEGIN,
94 PORT_310(FN_IN), /* PORT0_FN_IN -> PORT309_FN_IN */ 82 PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT309_FN_IN */
95 PORT_310(FN_OUT), /* PORT0_FN_OUT -> PORT309_FN_OUT */ 83 PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT309_FN_OUT */
96 PORT_310(FN0), /* PORT0_FN0 -> PORT309_FN0 */ 84 PORT_ALL(FN0), /* PORT0_FN0 -> PORT309_FN0 */
97 PORT_310(FN1), /* PORT0_FN1 -> PORT309_FN1 */ 85 PORT_ALL(FN1), /* PORT0_FN1 -> PORT309_FN1 */
98 PORT_310(FN2), /* PORT0_FN2 -> PORT309_FN2 */ 86 PORT_ALL(FN2), /* PORT0_FN2 -> PORT309_FN2 */
99 PORT_310(FN3), /* PORT0_FN3 -> PORT309_FN3 */ 87 PORT_ALL(FN3), /* PORT0_FN3 -> PORT309_FN3 */
100 PORT_310(FN4), /* PORT0_FN4 -> PORT309_FN4 */ 88 PORT_ALL(FN4), /* PORT0_FN4 -> PORT309_FN4 */
101 PORT_310(FN5), /* PORT0_FN5 -> PORT309_FN5 */ 89 PORT_ALL(FN5), /* PORT0_FN5 -> PORT309_FN5 */
102 PORT_310(FN6), /* PORT0_FN6 -> PORT309_FN6 */ 90 PORT_ALL(FN6), /* PORT0_FN6 -> PORT309_FN6 */
103 PORT_310(FN7), /* PORT0_FN7 -> PORT309_FN7 */ 91 PORT_ALL(FN7), /* PORT0_FN7 -> PORT309_FN7 */
104 92
105 MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1, 93 MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1,
106 MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1, 94 MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1,
@@ -508,6 +496,14 @@ enum {
508 SDHICMD2_PU_MARK, 496 SDHICMD2_PU_MARK,
509 MMCCMD0_PU_MARK, 497 MMCCMD0_PU_MARK,
510 MMCCMD1_PU_MARK, 498 MMCCMD1_PU_MARK,
499 MMCD0_0_PU_MARK,
500 MMCD0_1_PU_MARK,
501 MMCD0_2_PU_MARK,
502 MMCD0_3_PU_MARK,
503 MMCD0_4_PU_MARK,
504 MMCD0_5_PU_MARK,
505 MMCD0_6_PU_MARK,
506 MMCD0_7_PU_MARK,
511 FSIBISLD_PU_MARK, 507 FSIBISLD_PU_MARK,
512 FSIACK_PU_MARK, 508 FSIACK_PU_MARK,
513 FSIAILR_PU_MARK, 509 FSIAILR_PU_MARK,
@@ -517,45 +513,6 @@ enum {
517 PINMUX_MARK_END, 513 PINMUX_MARK_END,
518}; 514};
519 515
520#define PORT_DATA_I(nr) \
521 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN)
522
523#define PORT_DATA_I_PD(nr) \
524 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
525 PORT##nr##_IN, PORT##nr##_IN_PD)
526
527#define PORT_DATA_I_PU(nr) \
528 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
529 PORT##nr##_IN, PORT##nr##_IN_PU)
530
531#define PORT_DATA_I_PU_PD(nr) \
532 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
533 PORT##nr##_IN, PORT##nr##_IN_PD, \
534 PORT##nr##_IN_PU)
535
536#define PORT_DATA_O(nr) \
537 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
538 PORT##nr##_OUT)
539
540#define PORT_DATA_IO(nr) \
541 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
542 PORT##nr##_OUT, PORT##nr##_IN)
543
544#define PORT_DATA_IO_PD(nr) \
545 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
546 PORT##nr##_OUT, PORT##nr##_IN, \
547 PORT##nr##_IN_PD)
548
549#define PORT_DATA_IO_PU(nr) \
550 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
551 PORT##nr##_OUT, PORT##nr##_IN, \
552 PORT##nr##_IN_PU)
553
554#define PORT_DATA_IO_PU_PD(nr) \
555 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
556 PORT##nr##_OUT, PORT##nr##_IN, \
557 PORT##nr##_IN_PD, PORT##nr##_IN_PU)
558
559static pinmux_enum_t pinmux_data[] = { 516static pinmux_enum_t pinmux_data[] = {
560 /* specify valid pin states for each pin in GPIO mode */ 517 /* specify valid pin states for each pin in GPIO mode */
561 518
@@ -1561,6 +1518,24 @@ static pinmux_enum_t pinmux_data[] = {
1561 MSEL4CR_MSEL15_0), 1518 MSEL4CR_MSEL15_0),
1562 PINMUX_DATA(MMCCMD1_PU_MARK, PORT297_FN2, PORT297_IN_PU, 1519 PINMUX_DATA(MMCCMD1_PU_MARK, PORT297_FN2, PORT297_IN_PU,
1563 MSEL4CR_MSEL15_1), 1520 MSEL4CR_MSEL15_1),
1521
1522 PINMUX_DATA(MMCD0_0_PU_MARK,
1523 PORT271_FN1, PORT271_IN_PU, MSEL4CR_MSEL15_0),
1524 PINMUX_DATA(MMCD0_1_PU_MARK,
1525 PORT272_FN1, PORT272_IN_PU, MSEL4CR_MSEL15_0),
1526 PINMUX_DATA(MMCD0_2_PU_MARK,
1527 PORT273_FN1, PORT273_IN_PU, MSEL4CR_MSEL15_0),
1528 PINMUX_DATA(MMCD0_3_PU_MARK,
1529 PORT274_FN1, PORT274_IN_PU, MSEL4CR_MSEL15_0),
1530 PINMUX_DATA(MMCD0_4_PU_MARK,
1531 PORT275_FN1, PORT275_IN_PU, MSEL4CR_MSEL15_0),
1532 PINMUX_DATA(MMCD0_5_PU_MARK,
1533 PORT276_FN1, PORT276_IN_PU, MSEL4CR_MSEL15_0),
1534 PINMUX_DATA(MMCD0_6_PU_MARK,
1535 PORT277_FN1, PORT277_IN_PU, MSEL4CR_MSEL15_0),
1536 PINMUX_DATA(MMCD0_7_PU_MARK,
1537 PORT278_FN1, PORT278_IN_PU, MSEL4CR_MSEL15_0),
1538
1564 PINMUX_DATA(FSIBISLD_PU_MARK, PORT39_FN1, PORT39_IN_PU), 1539 PINMUX_DATA(FSIBISLD_PU_MARK, PORT39_FN1, PORT39_IN_PU),
1565 PINMUX_DATA(FSIACK_PU_MARK, PORT49_FN1, PORT49_IN_PU), 1540 PINMUX_DATA(FSIACK_PU_MARK, PORT49_FN1, PORT49_IN_PU),
1566 PINMUX_DATA(FSIAILR_PU_MARK, PORT50_FN5, PORT50_IN_PU), 1541 PINMUX_DATA(FSIAILR_PU_MARK, PORT50_FN5, PORT50_IN_PU),
@@ -1568,12 +1543,8 @@ static pinmux_enum_t pinmux_data[] = {
1568 PINMUX_DATA(FSIAISLD_PU_MARK, PORT55_FN1, PORT55_IN_PU), 1543 PINMUX_DATA(FSIAISLD_PU_MARK, PORT55_FN1, PORT55_IN_PU),
1569}; 1544};
1570 1545
1571#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
1572#define GPIO_PORT_310() _310(_GPIO_PORT, , unused)
1573#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
1574
1575static struct pinmux_gpio pinmux_gpios[] = { 1546static struct pinmux_gpio pinmux_gpios[] = {
1576 GPIO_PORT_310(), 1547 GPIO_PORT_ALL(),
1577 1548
1578 /* Table 25-1 (Functions 0-7) */ 1549 /* Table 25-1 (Functions 0-7) */
1579 GPIO_FN(VBUS_0), 1550 GPIO_FN(VBUS_0),
@@ -2236,24 +2207,20 @@ static struct pinmux_gpio pinmux_gpios[] = {
2236 GPIO_FN(SDHICMD2_PU), 2207 GPIO_FN(SDHICMD2_PU),
2237 GPIO_FN(MMCCMD0_PU), 2208 GPIO_FN(MMCCMD0_PU),
2238 GPIO_FN(MMCCMD1_PU), 2209 GPIO_FN(MMCCMD1_PU),
2210 GPIO_FN(MMCD0_0_PU),
2211 GPIO_FN(MMCD0_1_PU),
2212 GPIO_FN(MMCD0_2_PU),
2213 GPIO_FN(MMCD0_3_PU),
2214 GPIO_FN(MMCD0_4_PU),
2215 GPIO_FN(MMCD0_5_PU),
2216 GPIO_FN(MMCD0_6_PU),
2217 GPIO_FN(MMCD0_7_PU),
2239 GPIO_FN(FSIACK_PU), 2218 GPIO_FN(FSIACK_PU),
2240 GPIO_FN(FSIAILR_PU), 2219 GPIO_FN(FSIAILR_PU),
2241 GPIO_FN(FSIAIBT_PU), 2220 GPIO_FN(FSIAIBT_PU),
2242 GPIO_FN(FSIAISLD_PU), 2221 GPIO_FN(FSIAISLD_PU),
2243}; 2222};
2244 2223
2245#define PORTCR(nr, reg) \
2246 { PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
2247 0, \
2248 /*0001*/ PORT##nr##_OUT , \
2249 /*0010*/ PORT##nr##_IN , 0, 0, 0, 0, 0, 0, 0, \
2250 /*1010*/ PORT##nr##_IN_PD, 0, 0, 0, \
2251 /*1110*/ PORT##nr##_IN_PU, 0, \
2252 PORT##nr##_FN0, PORT##nr##_FN1, PORT##nr##_FN2, \
2253 PORT##nr##_FN3, PORT##nr##_FN4, PORT##nr##_FN5, \
2254 PORT##nr##_FN6, PORT##nr##_FN7, 0, 0, 0, 0, 0, 0, 0, 0 } \
2255 }
2256
2257static struct pinmux_cfg_reg pinmux_config_regs[] = { 2224static struct pinmux_cfg_reg pinmux_config_regs[] = {
2258 PORTCR(0, 0xe6050000), /* PORT0CR */ 2225 PORTCR(0, 0xe6050000), /* PORT0CR */
2259 PORTCR(1, 0xe6050001), /* PORT1CR */ 2226 PORTCR(1, 0xe6050001), /* PORT1CR */
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 79612737c5b2..34bbcbfb1706 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/bitrev.h> 22#include <linux/bitrev.h>
23#include <linux/console.h>
23#include <asm/system.h> 24#include <asm/system.h>
24#include <asm/io.h> 25#include <asm/io.h>
25#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
@@ -106,9 +107,8 @@ static int pd_power_down(struct generic_pm_domain *genpd)
106 return 0; 107 return 0;
107} 108}
108 109
109static int pd_power_up(struct generic_pm_domain *genpd) 110static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume)
110{ 111{
111 struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
112 unsigned int mask = 1 << sh7372_pd->bit_shift; 112 unsigned int mask = 1 << sh7372_pd->bit_shift;
113 unsigned int retry_count; 113 unsigned int retry_count;
114 int ret = 0; 114 int ret = 0;
@@ -123,13 +123,13 @@ static int pd_power_up(struct generic_pm_domain *genpd)
123 123
124 for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) { 124 for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) {
125 if (!(__raw_readl(SWUCR) & mask)) 125 if (!(__raw_readl(SWUCR) & mask))
126 goto out; 126 break;
127 if (retry_count > PSTR_RETRIES) 127 if (retry_count > PSTR_RETRIES)
128 udelay(PSTR_DELAY_US); 128 udelay(PSTR_DELAY_US);
129 else 129 else
130 cpu_relax(); 130 cpu_relax();
131 } 131 }
132 if (__raw_readl(SWUCR) & mask) 132 if (!retry_count)
133 ret = -EIO; 133 ret = -EIO;
134 134
135 if (!sh7372_pd->no_debug) 135 if (!sh7372_pd->no_debug)
@@ -137,12 +137,17 @@ static int pd_power_up(struct generic_pm_domain *genpd)
137 mask, __raw_readl(PSTR)); 137 mask, __raw_readl(PSTR));
138 138
139 out: 139 out:
140 if (ret == 0 && sh7372_pd->resume) 140 if (ret == 0 && sh7372_pd->resume && do_resume)
141 sh7372_pd->resume(); 141 sh7372_pd->resume();
142 142
143 return ret; 143 return ret;
144} 144}
145 145
146static int pd_power_up(struct generic_pm_domain *genpd)
147{
148 return __pd_power_up(to_sh7372_pd(genpd), true);
149}
150
146static void sh7372_a4r_suspend(void) 151static void sh7372_a4r_suspend(void)
147{ 152{
148 sh7372_intcs_suspend(); 153 sh7372_intcs_suspend();
@@ -174,7 +179,7 @@ void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
174 genpd->active_wakeup = pd_active_wakeup; 179 genpd->active_wakeup = pd_active_wakeup;
175 genpd->power_off = pd_power_down; 180 genpd->power_off = pd_power_down;
176 genpd->power_on = pd_power_up; 181 genpd->power_on = pd_power_up;
177 genpd->power_on(&sh7372_pd->genpd); 182 __pd_power_up(sh7372_pd, false);
178} 183}
179 184
180void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, 185void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
@@ -227,11 +232,23 @@ struct sh7372_pm_domain sh7372_a3sp = {
227 .no_debug = true, 232 .no_debug = true,
228}; 233};
229 234
235static void sh7372_a3sp_init(void)
236{
237 /* serial consoles make use of SCIF hardware located in A3SP,
238 * keep such power domain on if "no_console_suspend" is set.
239 */
240 sh7372_a3sp.stay_on = !console_suspend_enabled;
241}
242
230struct sh7372_pm_domain sh7372_a3sg = { 243struct sh7372_pm_domain sh7372_a3sg = {
231 .bit_shift = 13, 244 .bit_shift = 13,
232}; 245};
233 246
234#endif /* CONFIG_PM */ 247#else /* !CONFIG_PM */
248
249static inline void sh7372_a3sp_init(void) {}
250
251#endif /* !CONFIG_PM */
235 252
236#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE) 253#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
237static int sh7372_do_idle_core_standby(unsigned long unused) 254static int sh7372_do_idle_core_standby(unsigned long unused)
@@ -402,22 +419,18 @@ static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2)
402 419
403#ifdef CONFIG_CPU_IDLE 420#ifdef CONFIG_CPU_IDLE
404 421
405static void sh7372_cpuidle_setup(struct cpuidle_device *dev) 422static void sh7372_cpuidle_setup(struct cpuidle_driver *drv)
406{ 423{
407 struct cpuidle_state *state; 424 struct cpuidle_state *state = &drv->states[drv->state_count];
408 int i = dev->state_count;
409 425
410 state = &dev->states[i];
411 snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); 426 snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
412 strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN); 427 strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN);
413 state->exit_latency = 10; 428 state->exit_latency = 10;
414 state->target_residency = 20 + 10; 429 state->target_residency = 20 + 10;
415 state->power_usage = 1; /* perhaps not */ 430 state->flags = CPUIDLE_FLAG_TIME_VALID;
416 state->flags = 0; 431 shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_core_standby;
417 state->flags |= CPUIDLE_FLAG_TIME_VALID;
418 shmobile_cpuidle_modes[i] = sh7372_enter_core_standby;
419 432
420 dev->state_count = i + 1; 433 drv->state_count++;
421} 434}
422 435
423static void sh7372_cpuidle_init(void) 436static void sh7372_cpuidle_init(void)
@@ -469,6 +482,8 @@ void __init sh7372_pm_init(void)
469 /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */ 482 /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
470 __raw_writel(0, PDNSEL); 483 __raw_writel(0, PDNSEL);
471 484
485 sh7372_a3sp_init();
486
472 sh7372_suspend_init(); 487 sh7372_suspend_init();
473 sh7372_cpuidle_init(); 488 sh7372_cpuidle_init();
474} 489}
diff --git a/arch/arm/mach-tegra/board-dt.c b/arch/arm/mach-tegra/board-dt.c
index d368f8dafcfd..74743ad3d2d3 100644
--- a/arch/arm/mach-tegra/board-dt.c
+++ b/arch/arm/mach-tegra/board-dt.c
@@ -101,6 +101,13 @@ static void __init tegra_dt_init(void)
101 101
102 tegra_clk_init_from_table(tegra_dt_clk_init_table); 102 tegra_clk_init_from_table(tegra_dt_clk_init_table);
103 103
104 /*
105 * Finished with the static registrations now; fill in the missing
106 * devices
107 */
108 of_platform_populate(NULL, tegra_dt_match_table,
109 tegra20_auxdata_lookup, NULL);
110
104 for (i = 0; i < ARRAY_SIZE(pinmux_configs); i++) { 111 for (i = 0; i < ARRAY_SIZE(pinmux_configs); i++) {
105 if (of_machine_is_compatible(pinmux_configs[i].machine)) { 112 if (of_machine_is_compatible(pinmux_configs[i].machine)) {
106 pinmux_configs[i].init(); 113 pinmux_configs[i].init();
@@ -110,12 +117,6 @@ static void __init tegra_dt_init(void)
110 117
111 WARN(i == ARRAY_SIZE(pinmux_configs), 118 WARN(i == ARRAY_SIZE(pinmux_configs),
112 "Unknown platform! Pinmuxing not initialized\n"); 119 "Unknown platform! Pinmuxing not initialized\n");
113
114 /*
115 * Finished with the static registrations now; fill in the missing
116 * devices
117 */
118 of_platform_populate(NULL, tegra_dt_match_table, tegra20_auxdata_lookup, NULL);
119} 120}
120 121
121static const char * tegra_dt_board_compat[] = { 122static const char * tegra_dt_board_compat[] = {
diff --git a/arch/arm/mach-tegra/board-harmony-pinmux.c b/arch/arm/mach-tegra/board-harmony-pinmux.c
index e99b45618cd0..7a4a26d5174c 100644
--- a/arch/arm/mach-tegra/board-harmony-pinmux.c
+++ b/arch/arm/mach-tegra/board-harmony-pinmux.c
@@ -16,6 +16,8 @@
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/of.h>
20
19#include <mach/pinmux.h> 21#include <mach/pinmux.h>
20 22
21#include "gpio-names.h" 23#include "gpio-names.h"
@@ -161,7 +163,9 @@ static struct tegra_gpio_table gpio_table[] = {
161 163
162void harmony_pinmux_init(void) 164void harmony_pinmux_init(void)
163{ 165{
164 platform_add_devices(pinmux_devices, ARRAY_SIZE(pinmux_devices)); 166 if (!of_machine_is_compatible("nvidia,tegra20"))
167 platform_add_devices(pinmux_devices,
168 ARRAY_SIZE(pinmux_devices));
165 169
166 tegra_pinmux_config_table(harmony_pinmux, ARRAY_SIZE(harmony_pinmux)); 170 tegra_pinmux_config_table(harmony_pinmux, ARRAY_SIZE(harmony_pinmux));
167 171
diff --git a/arch/arm/mach-tegra/board-paz00-pinmux.c b/arch/arm/mach-tegra/board-paz00-pinmux.c
index fb20894862b0..be30e215f4b7 100644
--- a/arch/arm/mach-tegra/board-paz00-pinmux.c
+++ b/arch/arm/mach-tegra/board-paz00-pinmux.c
@@ -16,6 +16,8 @@
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/of.h>
20
19#include <mach/pinmux.h> 21#include <mach/pinmux.h>
20 22
21#include "gpio-names.h" 23#include "gpio-names.h"
@@ -158,7 +160,9 @@ static struct tegra_gpio_table gpio_table[] = {
158 160
159void paz00_pinmux_init(void) 161void paz00_pinmux_init(void)
160{ 162{
161 platform_add_devices(pinmux_devices, ARRAY_SIZE(pinmux_devices)); 163 if (!of_machine_is_compatible("nvidia,tegra20"))
164 platform_add_devices(pinmux_devices,
165 ARRAY_SIZE(pinmux_devices));
162 166
163 tegra_pinmux_config_table(paz00_pinmux, ARRAY_SIZE(paz00_pinmux)); 167 tegra_pinmux_config_table(paz00_pinmux, ARRAY_SIZE(paz00_pinmux));
164 168
diff --git a/arch/arm/mach-tegra/board-seaboard-pinmux.c b/arch/arm/mach-tegra/board-seaboard-pinmux.c
index fbce31daa3c9..b1c2972f62fe 100644
--- a/arch/arm/mach-tegra/board-seaboard-pinmux.c
+++ b/arch/arm/mach-tegra/board-seaboard-pinmux.c
@@ -16,6 +16,7 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/of.h>
19 20
20#include <mach/pinmux.h> 21#include <mach/pinmux.h>
21#include <mach/pinmux-t2.h> 22#include <mach/pinmux-t2.h>
@@ -191,6 +192,7 @@ static struct tegra_gpio_table common_gpio_table[] = {
191 { .gpio = TEGRA_GPIO_SD2_POWER, .enable = true }, 192 { .gpio = TEGRA_GPIO_SD2_POWER, .enable = true },
192 { .gpio = TEGRA_GPIO_LIDSWITCH, .enable = true }, 193 { .gpio = TEGRA_GPIO_LIDSWITCH, .enable = true },
193 { .gpio = TEGRA_GPIO_POWERKEY, .enable = true }, 194 { .gpio = TEGRA_GPIO_POWERKEY, .enable = true },
195 { .gpio = TEGRA_GPIO_HP_DET, .enable = true },
194 { .gpio = TEGRA_GPIO_ISL29018_IRQ, .enable = true }, 196 { .gpio = TEGRA_GPIO_ISL29018_IRQ, .enable = true },
195 { .gpio = TEGRA_GPIO_CDC_IRQ, .enable = true }, 197 { .gpio = TEGRA_GPIO_CDC_IRQ, .enable = true },
196 { .gpio = TEGRA_GPIO_USB1, .enable = true }, 198 { .gpio = TEGRA_GPIO_USB1, .enable = true },
@@ -218,7 +220,9 @@ static void __init update_pinmux(struct tegra_pingroup_config *newtbl, int size)
218 220
219void __init seaboard_common_pinmux_init(void) 221void __init seaboard_common_pinmux_init(void)
220{ 222{
221 platform_add_devices(pinmux_devices, ARRAY_SIZE(pinmux_devices)); 223 if (!of_machine_is_compatible("nvidia,tegra20"))
224 platform_add_devices(pinmux_devices,
225 ARRAY_SIZE(pinmux_devices));
222 226
223 tegra_pinmux_config_table(seaboard_pinmux, ARRAY_SIZE(seaboard_pinmux)); 227 tegra_pinmux_config_table(seaboard_pinmux, ARRAY_SIZE(seaboard_pinmux));
224 228
diff --git a/arch/arm/mach-tegra/board-trimslice-pinmux.c b/arch/arm/mach-tegra/board-trimslice-pinmux.c
index 4969dd28a04c..7ab719d46da0 100644
--- a/arch/arm/mach-tegra/board-trimslice-pinmux.c
+++ b/arch/arm/mach-tegra/board-trimslice-pinmux.c
@@ -16,6 +16,7 @@
16#include <linux/gpio.h> 16#include <linux/gpio.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/of.h>
19 20
20#include <mach/pinmux.h> 21#include <mach/pinmux.h>
21 22
@@ -157,7 +158,9 @@ static struct tegra_gpio_table gpio_table[] = {
157 158
158void __init trimslice_pinmux_init(void) 159void __init trimslice_pinmux_init(void)
159{ 160{
160 platform_add_devices(pinmux_devices, ARRAY_SIZE(pinmux_devices)); 161 if (!of_machine_is_compatible("nvidia,tegra20"))
162 platform_add_devices(pinmux_devices,
163 ARRAY_SIZE(pinmux_devices));
161 tegra_pinmux_config_table(trimslice_pinmux, ARRAY_SIZE(trimslice_pinmux)); 164 tegra_pinmux_config_table(trimslice_pinmux, ARRAY_SIZE(trimslice_pinmux));
162 tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table)); 165 tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table));
163} 166}
diff --git a/arch/arm/mach-w90x900/dev.c b/arch/arm/mach-w90x900/dev.c
index 7a1fa6adb7c3..5b0c38abacc1 100644
--- a/arch/arm/mach-w90x900/dev.c
+++ b/arch/arm/mach-w90x900/dev.c
@@ -422,7 +422,7 @@ struct platform_device nuc900_device_kpi = {
422 422
423/* LCD controller*/ 423/* LCD controller*/
424 424
425static struct nuc900fb_display __initdata nuc900_lcd_info[] = { 425static struct nuc900fb_display nuc900_lcd_info[] = {
426 /* Giantplus Technology GPM1040A0 320x240 Color TFT LCD */ 426 /* Giantplus Technology GPM1040A0 320x240 Color TFT LCD */
427 [0] = { 427 [0] = {
428 .type = LCM_DCCS_VA_SRC_RGB565, 428 .type = LCM_DCCS_VA_SRC_RGB565,
@@ -445,7 +445,7 @@ static struct nuc900fb_display __initdata nuc900_lcd_info[] = {
445 }, 445 },
446}; 446};
447 447
448static struct nuc900fb_mach_info nuc900_fb_info __initdata = { 448static struct nuc900fb_mach_info nuc900_fb_info = {
449#if defined(CONFIG_GPM1040A0_320X240) 449#if defined(CONFIG_GPM1040A0_320X240)
450 .displays = &nuc900_lcd_info[0], 450 .displays = &nuc900_lcd_info[0],
451#else 451#else
diff --git a/arch/arm/mach-w90x900/include/mach/mfp.h b/arch/arm/mach-w90x900/include/mach/mfp.h
index 94c0e71617c6..23ef1f573abd 100644
--- a/arch/arm/mach-w90x900/include/mach/mfp.h
+++ b/arch/arm/mach-w90x900/include/mach/mfp.h
@@ -19,6 +19,7 @@
19extern void mfp_set_groupf(struct device *dev); 19extern void mfp_set_groupf(struct device *dev);
20extern void mfp_set_groupc(struct device *dev); 20extern void mfp_set_groupc(struct device *dev);
21extern void mfp_set_groupi(struct device *dev); 21extern void mfp_set_groupi(struct device *dev);
22extern void mfp_set_groupg(struct device *dev); 22extern void mfp_set_groupg(struct device *dev, const char *subname);
23extern void mfp_set_groupd(struct device *dev, const char *subname);
23 24
24#endif /* __ASM_ARCH_MFP_H */ 25#endif /* __ASM_ARCH_MFP_H */
diff --git a/arch/arm/mach-w90x900/include/mach/nuc900_spi.h b/arch/arm/mach-w90x900/include/mach/nuc900_spi.h
index bd94819e314f..2c4e0c128501 100644
--- a/arch/arm/mach-w90x900/include/mach/nuc900_spi.h
+++ b/arch/arm/mach-w90x900/include/mach/nuc900_spi.h
@@ -14,7 +14,7 @@
14#ifndef __ASM_ARCH_SPI_H 14#ifndef __ASM_ARCH_SPI_H
15#define __ASM_ARCH_SPI_H 15#define __ASM_ARCH_SPI_H
16 16
17extern void mfp_set_groupg(struct device *dev); 17extern void mfp_set_groupg(struct device *dev, const char *subname);
18 18
19struct nuc900_spi_info { 19struct nuc900_spi_info {
20 unsigned int num_cs; 20 unsigned int num_cs;
diff --git a/arch/arm/mach-w90x900/mfp.c b/arch/arm/mach-w90x900/mfp.c
index fb7fb627b1a5..9dd74612bb87 100644
--- a/arch/arm/mach-w90x900/mfp.c
+++ b/arch/arm/mach-w90x900/mfp.c
@@ -26,10 +26,8 @@
26#define REG_MFSEL (W90X900_VA_GCR + 0xC) 26#define REG_MFSEL (W90X900_VA_GCR + 0xC)
27 27
28#define GPSELF (0x01 << 1) 28#define GPSELF (0x01 << 1)
29
30#define GPSELC (0x03 << 2) 29#define GPSELC (0x03 << 2)
31#define ENKPI (0x02 << 2) 30#define GPSELD (0x0f << 4)
32#define ENNAND (0x01 << 2)
33 31
34#define GPSELEI0 (0x01 << 26) 32#define GPSELEI0 (0x01 << 26)
35#define GPSELEI1 (0x01 << 27) 33#define GPSELEI1 (0x01 << 27)
@@ -37,11 +35,16 @@
37#define GPIOG0TO1 (0x03 << 14) 35#define GPIOG0TO1 (0x03 << 14)
38#define GPIOG2TO3 (0x03 << 16) 36#define GPIOG2TO3 (0x03 << 16)
39#define GPIOG22TO23 (0x03 << 22) 37#define GPIOG22TO23 (0x03 << 22)
38#define GPIOG18TO20 (0x07 << 18)
40 39
41#define ENSPI (0x0a << 14) 40#define ENSPI (0x0a << 14)
42#define ENI2C0 (0x01 << 14) 41#define ENI2C0 (0x01 << 14)
43#define ENI2C1 (0x01 << 16) 42#define ENI2C1 (0x01 << 16)
44#define ENAC97 (0x02 << 22) 43#define ENAC97 (0x02 << 22)
44#define ENSD1 (0x02 << 18)
45#define ENSD0 (0x0a << 4)
46#define ENKPI (0x02 << 2)
47#define ENNAND (0x01 << 2)
45 48
46static DEFINE_MUTEX(mfp_mutex); 49static DEFINE_MUTEX(mfp_mutex);
47 50
@@ -127,16 +130,19 @@ void mfp_set_groupi(struct device *dev)
127} 130}
128EXPORT_SYMBOL(mfp_set_groupi); 131EXPORT_SYMBOL(mfp_set_groupi);
129 132
130void mfp_set_groupg(struct device *dev) 133void mfp_set_groupg(struct device *dev, const char *subname)
131{ 134{
132 unsigned long mfpen; 135 unsigned long mfpen;
133 const char *dev_id; 136 const char *dev_id;
134 137
135 BUG_ON(!dev); 138 BUG_ON((!dev) && (!subname));
136 139
137 mutex_lock(&mfp_mutex); 140 mutex_lock(&mfp_mutex);
138 141
139 dev_id = dev_name(dev); 142 if (subname != NULL)
143 dev_id = subname;
144 else
145 dev_id = dev_name(dev);
140 146
141 mfpen = __raw_readl(REG_MFSEL); 147 mfpen = __raw_readl(REG_MFSEL);
142 148
@@ -152,6 +158,9 @@ void mfp_set_groupg(struct device *dev)
152 } else if (strcmp(dev_id, "nuc900-audio") == 0) { 158 } else if (strcmp(dev_id, "nuc900-audio") == 0) {
153 mfpen &= ~(GPIOG22TO23); 159 mfpen &= ~(GPIOG22TO23);
154 mfpen |= ENAC97;/*enable AC97*/ 160 mfpen |= ENAC97;/*enable AC97*/
161 } else if (strcmp(dev_id, "nuc900-mmc-port1") == 0) {
162 mfpen &= ~(GPIOG18TO20);
163 mfpen |= (ENSD1 | 0x01);/*enable sd1*/
155 } else { 164 } else {
156 mfpen &= ~(GPIOG0TO1 | GPIOG2TO3);/*GPIOG[3:0]*/ 165 mfpen &= ~(GPIOG0TO1 | GPIOG2TO3);/*GPIOG[3:0]*/
157 } 166 }
@@ -162,3 +171,30 @@ void mfp_set_groupg(struct device *dev)
162} 171}
163EXPORT_SYMBOL(mfp_set_groupg); 172EXPORT_SYMBOL(mfp_set_groupg);
164 173
174void mfp_set_groupd(struct device *dev, const char *subname)
175{
176 unsigned long mfpen;
177 const char *dev_id;
178
179 BUG_ON((!dev) && (!subname));
180
181 mutex_lock(&mfp_mutex);
182
183 if (subname != NULL)
184 dev_id = subname;
185 else
186 dev_id = dev_name(dev);
187
188 mfpen = __raw_readl(REG_MFSEL);
189
190 if (strcmp(dev_id, "nuc900-mmc-port0") == 0) {
191 mfpen &= ~GPSELD;/*enable sd0*/
192 mfpen |= ENSD0;
193 } else
194 mfpen &= (~GPSELD);
195
196 __raw_writel(mfpen, REG_MFSEL);
197
198 mutex_unlock(&mfp_mutex);
199}
200EXPORT_SYMBOL(mfp_set_groupd);
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 8ac9e9f84790..b1e192ba8c24 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -61,7 +61,7 @@ static inline void cache_sync(void)
61{ 61{
62 void __iomem *base = l2x0_base; 62 void __iomem *base = l2x0_base;
63 63
64#ifdef CONFIG_ARM_ERRATA_753970 64#ifdef CONFIG_PL310_ERRATA_753970
65 /* write to an unmmapped register */ 65 /* write to an unmmapped register */
66 writel_relaxed(0, base + L2X0_DUMMY_REG); 66 writel_relaxed(0, base + L2X0_DUMMY_REG);
67#else 67#else
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e4e7f6cba1ab..1aa664a1999f 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -168,7 +168,7 @@ static int __init consistent_init(void)
168 pte_t *pte; 168 pte_t *pte;
169 int i = 0; 169 int i = 0;
170 unsigned long base = consistent_base; 170 unsigned long base = consistent_base;
171 unsigned long num_ptes = (CONSISTENT_END - base) >> PGDIR_SHIFT; 171 unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
172 172
173 consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); 173 consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
174 if (!consistent_pte) { 174 if (!consistent_pte) {
@@ -332,6 +332,15 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
332 struct page *page; 332 struct page *page;
333 void *addr; 333 void *addr;
334 334
335 /*
336 * Following is a work-around (a.k.a. hack) to prevent pages
337 * with __GFP_COMP being passed to split_page() which cannot
338 * handle them. The real problem is that this flag probably
339 * should be 0 on ARM as it is not supported on this
340 * platform; see CONFIG_HUGETLBFS.
341 */
342 gfp &= ~(__GFP_COMP);
343
335 *handle = ~0; 344 *handle = ~0;
336 size = PAGE_ALIGN(size); 345 size = PAGE_ALIGN(size);
337 346
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 74be05f3e03a..44b628e4d6ea 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -9,8 +9,7 @@
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/personality.h> 10#include <linux/personality.h>
11#include <linux/random.h> 11#include <linux/random.h>
12#include <asm/cputype.h> 12#include <asm/cachetype.h>
13#include <asm/system.h>
14 13
15#define COLOUR_ALIGN(addr,pgoff) \ 14#define COLOUR_ALIGN(addr,pgoff) \
16 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 15 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
@@ -32,25 +31,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
32 struct mm_struct *mm = current->mm; 31 struct mm_struct *mm = current->mm;
33 struct vm_area_struct *vma; 32 struct vm_area_struct *vma;
34 unsigned long start_addr; 33 unsigned long start_addr;
35#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) 34 int do_align = 0;
36 unsigned int cache_type; 35 int aliasing = cache_is_vipt_aliasing();
37 int do_align = 0, aliasing = 0;
38 36
39 /* 37 /*
40 * We only need to do colour alignment if either the I or D 38 * We only need to do colour alignment if either the I or D
41 * caches alias. This is indicated by bits 9 and 21 of the 39 * caches alias.
42 * cache type register.
43 */ 40 */
44 cache_type = read_cpuid_cachetype(); 41 if (aliasing)
45 if (cache_type != read_cpuid_id()) { 42 do_align = filp || (flags & MAP_SHARED);
46 aliasing = (cache_type | cache_type >> 12) & (1 << 11);
47 if (aliasing)
48 do_align = filp || flags & MAP_SHARED;
49 }
50#else
51#define do_align 0
52#define aliasing 0
53#endif
54 43
55 /* 44 /*
56 * We enforce the MAP_FIXED case. 45 * We enforce the MAP_FIXED case.
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig
index a08a95107a63..b3a1f2b3ada3 100644
--- a/arch/arm/plat-mxc/Kconfig
+++ b/arch/arm/plat-mxc/Kconfig
@@ -10,7 +10,7 @@ choice
10 10
11config ARCH_IMX_V4_V5 11config ARCH_IMX_V4_V5
12 bool "i.MX1, i.MX21, i.MX25, i.MX27" 12 bool "i.MX1, i.MX21, i.MX25, i.MX27"
13 select AUTO_ZRELADDR 13 select AUTO_ZRELADDR if !ZBOOT_ROM
14 select ARM_PATCH_PHYS_VIRT 14 select ARM_PATCH_PHYS_VIRT
15 help 15 help
16 This enables support for systems based on the Freescale i.MX ARMv4 16 This enables support for systems based on the Freescale i.MX ARMv4
@@ -26,7 +26,7 @@ config ARCH_IMX_V6_V7
26 26
27config ARCH_MX5 27config ARCH_MX5
28 bool "i.MX50, i.MX51, i.MX53" 28 bool "i.MX50, i.MX51, i.MX53"
29 select AUTO_ZRELADDR 29 select AUTO_ZRELADDR if !ZBOOT_ROM
30 select ARM_PATCH_PHYS_VIRT 30 select ARM_PATCH_PHYS_VIRT
31 help 31 help
32 This enables support for machines using Freescale's i.MX50 and i.MX53 32 This enables support for machines using Freescale's i.MX50 and i.MX53
diff --git a/arch/arm/plat-mxc/avic.c b/arch/arm/plat-mxc/avic.c
index 8875fb415f68..55f15699a383 100644
--- a/arch/arm/plat-mxc/avic.c
+++ b/arch/arm/plat-mxc/avic.c
@@ -22,6 +22,7 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <mach/common.h> 23#include <mach/common.h>
24#include <asm/mach/irq.h> 24#include <asm/mach/irq.h>
25#include <asm/exception.h>
25#include <mach/hardware.h> 26#include <mach/hardware.h>
26 27
27#include "irq-common.h" 28#include "irq-common.h"
diff --git a/arch/arm/plat-mxc/cpufreq.c b/arch/arm/plat-mxc/cpufreq.c
index 74aac96cda20..adbff706ef6f 100644
--- a/arch/arm/plat-mxc/cpufreq.c
+++ b/arch/arm/plat-mxc/cpufreq.c
@@ -17,6 +17,7 @@
17 * the CPU clock speed on the fly. 17 * the CPU clock speed on the fly.
18 */ 18 */
19 19
20#include <linux/module.h>
20#include <linux/cpufreq.h> 21#include <linux/cpufreq.h>
21#include <linux/clk.h> 22#include <linux/clk.h>
22#include <linux/err.h> 23#include <linux/err.h>
diff --git a/arch/arm/plat-mxc/gic.c b/arch/arm/plat-mxc/gic.c
index b3b8eed263b8..12f8f8109010 100644
--- a/arch/arm/plat-mxc/gic.c
+++ b/arch/arm/plat-mxc/gic.c
@@ -28,21 +28,14 @@ asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
28 if (irqnr == 1023) 28 if (irqnr == 1023)
29 break; 29 break;
30 30
31 if (irqnr > 29 && irqnr < 1021) 31 if (irqnr > 15 && irqnr < 1021)
32 handle_IRQ(irqnr, regs); 32 handle_IRQ(irqnr, regs);
33#ifdef CONFIG_SMP 33#ifdef CONFIG_SMP
34 else if (irqnr < 16) { 34 else {
35 writel_relaxed(irqstat, gic_cpu_base_addr + 35 writel_relaxed(irqstat, gic_cpu_base_addr +
36 GIC_CPU_EOI); 36 GIC_CPU_EOI);
37 handle_IPI(irqnr, regs); 37 handle_IPI(irqnr, regs);
38 } 38 }
39#endif 39#endif
40#ifdef CONFIG_LOCAL_TIMERS
41 else if (irqnr == 29) {
42 writel_relaxed(irqstat, gic_cpu_base_addr +
43 GIC_CPU_EOI);
44 handle_local_timer(regs);
45 }
46#endif
47 } while (1); 40 } while (1);
48} 41}
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index 83b745a5e1b7..c75f254abd85 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -85,7 +85,6 @@ enum mxc_cpu_pwr_mode {
85}; 85};
86 86
87extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode); 87extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode);
88extern void (*imx_idle)(void);
89extern void imx_print_silicon_rev(const char *cpu, int srev); 88extern void imx_print_silicon_rev(const char *cpu, int srev);
90 89
91void avic_handle_irq(struct pt_regs *); 90void avic_handle_irq(struct pt_regs *);
@@ -133,4 +132,5 @@ extern void imx53_qsb_common_init(void);
133extern void imx53_smd_common_init(void); 132extern void imx53_smd_common_init(void);
134extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode); 133extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
135extern void imx6q_pm_init(void); 134extern void imx6q_pm_init(void);
135extern void imx6q_clock_map_io(void);
136#endif 136#endif
diff --git a/arch/arm/plat-mxc/include/mach/entry-macro.S b/arch/arm/plat-mxc/include/mach/entry-macro.S
index 9fe0dfcf4e7e..ca5cf26a04b1 100644
--- a/arch/arm/plat-mxc/include/mach/entry-macro.S
+++ b/arch/arm/plat-mxc/include/mach/entry-macro.S
@@ -25,6 +25,3 @@
25 25
26 .macro test_for_ipi, irqnr, irqstat, base, tmp 26 .macro test_for_ipi, irqnr, irqstat, base, tmp
27 .endm 27 .endm
28
29 .macro test_for_ltirq, irqnr, irqstat, base, tmp
30 .endm
diff --git a/arch/arm/plat-mxc/include/mach/mxc.h b/arch/arm/plat-mxc/include/mach/mxc.h
index 00a78193c681..a4d36d601d55 100644
--- a/arch/arm/plat-mxc/include/mach/mxc.h
+++ b/arch/arm/plat-mxc/include/mach/mxc.h
@@ -50,20 +50,6 @@
50#define IMX_CHIP_REVISION_3_3 0x33 50#define IMX_CHIP_REVISION_3_3 0x33
51#define IMX_CHIP_REVISION_UNKNOWN 0xff 51#define IMX_CHIP_REVISION_UNKNOWN 0xff
52 52
53#define IMX_CHIP_REVISION_1_0_STRING "1.0"
54#define IMX_CHIP_REVISION_1_1_STRING "1.1"
55#define IMX_CHIP_REVISION_1_2_STRING "1.2"
56#define IMX_CHIP_REVISION_1_3_STRING "1.3"
57#define IMX_CHIP_REVISION_2_0_STRING "2.0"
58#define IMX_CHIP_REVISION_2_1_STRING "2.1"
59#define IMX_CHIP_REVISION_2_2_STRING "2.2"
60#define IMX_CHIP_REVISION_2_3_STRING "2.3"
61#define IMX_CHIP_REVISION_3_0_STRING "3.0"
62#define IMX_CHIP_REVISION_3_1_STRING "3.1"
63#define IMX_CHIP_REVISION_3_2_STRING "3.2"
64#define IMX_CHIP_REVISION_3_3_STRING "3.3"
65#define IMX_CHIP_REVISION_UNKNOWN_STRING "unknown"
66
67#ifndef __ASSEMBLY__ 53#ifndef __ASSEMBLY__
68extern unsigned int __mxc_cpu_type; 54extern unsigned int __mxc_cpu_type;
69#endif 55#endif
diff --git a/arch/arm/plat-mxc/include/mach/system.h b/arch/arm/plat-mxc/include/mach/system.h
index cf88b3593fba..b9895d250167 100644
--- a/arch/arm/plat-mxc/include/mach/system.h
+++ b/arch/arm/plat-mxc/include/mach/system.h
@@ -17,14 +17,9 @@
17#ifndef __ASM_ARCH_MXC_SYSTEM_H__ 17#ifndef __ASM_ARCH_MXC_SYSTEM_H__
18#define __ASM_ARCH_MXC_SYSTEM_H__ 18#define __ASM_ARCH_MXC_SYSTEM_H__
19 19
20extern void (*imx_idle)(void);
21
22static inline void arch_idle(void) 20static inline void arch_idle(void)
23{ 21{
24 if (imx_idle != NULL) 22 cpu_do_idle();
25 (imx_idle)();
26 else
27 cpu_do_idle();
28} 23}
29 24
30void arch_reset(char mode, const char *cmd); 25void arch_reset(char mode, const char *cmd);
diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c
index 42d74ea59084..845de59f07ed 100644
--- a/arch/arm/plat-mxc/pwm.c
+++ b/arch/arm/plat-mxc/pwm.c
@@ -32,6 +32,9 @@
32#define MX3_PWMSAR 0x0C /* PWM Sample Register */ 32#define MX3_PWMSAR 0x0C /* PWM Sample Register */
33#define MX3_PWMPR 0x10 /* PWM Period Register */ 33#define MX3_PWMPR 0x10 /* PWM Period Register */
34#define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4) 34#define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4)
35#define MX3_PWMCR_DOZEEN (1 << 24)
36#define MX3_PWMCR_WAITEN (1 << 23)
37#define MX3_PWMCR_DBGEN (1 << 22)
35#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16) 38#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
36#define MX3_PWMCR_CLKSRC_IPG (1 << 16) 39#define MX3_PWMCR_CLKSRC_IPG (1 << 16)
37#define MX3_PWMCR_EN (1 << 0) 40#define MX3_PWMCR_EN (1 << 0)
@@ -77,7 +80,9 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
77 writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR); 80 writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
78 writel(period_cycles, pwm->mmio_base + MX3_PWMPR); 81 writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
79 82
80 cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN; 83 cr = MX3_PWMCR_PRESCALER(prescale) |
84 MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
85 MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
81 86
82 if (cpu_is_mx25()) 87 if (cpu_is_mx25())
83 cr |= MX3_PWMCR_CLKSRC_IPG; 88 cr |= MX3_PWMCR_CLKSRC_IPG;
diff --git a/arch/arm/plat-mxc/system.c b/arch/arm/plat-mxc/system.c
index 9dad8dcc2ea9..d65fb31a55ca 100644
--- a/arch/arm/plat-mxc/system.c
+++ b/arch/arm/plat-mxc/system.c
@@ -21,6 +21,7 @@
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/module.h>
24 25
25#include <mach/hardware.h> 26#include <mach/hardware.h>
26#include <mach/common.h> 27#include <mach/common.h>
@@ -28,8 +29,8 @@
28#include <asm/system.h> 29#include <asm/system.h>
29#include <asm/mach-types.h> 30#include <asm/mach-types.h>
30 31
31void (*imx_idle)(void) = NULL;
32void __iomem *(*imx_ioremap)(unsigned long, size_t, unsigned int) = NULL; 32void __iomem *(*imx_ioremap)(unsigned long, size_t, unsigned int) = NULL;
33EXPORT_SYMBOL_GPL(imx_ioremap);
33 34
34static void __iomem *wdog_base; 35static void __iomem *wdog_base;
35 36
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index e993a184189a..a3c164c7ba82 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -17,6 +17,7 @@
17#include <linux/io.h> 17#include <linux/io.h>
18 18
19#include <asm/mach/irq.h> 19#include <asm/mach/irq.h>
20#include <asm/exception.h>
20 21
21#include <mach/hardware.h> 22#include <mach/hardware.h>
22#include <mach/common.h> 23#include <mach/common.h>
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 197ca03c3f7d..eb73ab40e955 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -165,8 +165,8 @@ struct dpll_data {
165 u8 auto_recal_bit; 165 u8 auto_recal_bit;
166 u8 recal_en_bit; 166 u8 recal_en_bit;
167 u8 recal_st_bit; 167 u8 recal_st_bit;
168 u8 flags;
169# endif 168# endif
169 u8 flags;
170}; 170};
171 171
172#endif 172#endif
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index c50df4814f6f..3ff3e36580f2 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -30,6 +30,7 @@
30#include <linux/delay.h> 30#include <linux/delay.h>
31 31
32#include <plat/i2c.h> 32#include <plat/i2c.h>
33#include <plat/omap_hwmod.h>
33 34
34struct sys_timer; 35struct sys_timer;
35 36
@@ -55,6 +56,8 @@ void am35xx_init_early(void);
55void ti816x_init_early(void); 56void ti816x_init_early(void);
56void omap4430_init_early(void); 57void omap4430_init_early(void);
57 58
59extern int omap_dss_reset(struct omap_hwmod *);
60
58void omap_sram_init(void); 61void omap_sram_init(void);
59 62
60/* 63/*
diff --git a/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c b/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
index a9276667c2fb..c7adad0e8de0 100644
--- a/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
+++ b/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
@@ -12,7 +12,7 @@
12*/ 12*/
13 13
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/module.h> 15#include <linux/export.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/ioport.h> 17#include <linux/ioport.h>
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c
index e1cbc728c775..c8bec9c7655d 100644
--- a/arch/arm/plat-s5p/sysmmu.c
+++ b/arch/arm/plat-s5p/sysmmu.c
@@ -11,6 +11,7 @@
11#include <linux/io.h> 11#include <linux/io.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/export.h>
14 15
15#include <asm/pgtable.h> 16#include <asm/pgtable.h>
16 17
diff --git a/arch/arm/plat-samsung/dev-backlight.c b/arch/arm/plat-samsung/dev-backlight.c
index e657305644cc..a976c023b286 100644
--- a/arch/arm/plat-samsung/dev-backlight.c
+++ b/arch/arm/plat-samsung/dev-backlight.c
@@ -15,7 +15,6 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/pwm_backlight.h> 17#include <linux/pwm_backlight.h>
18#include <linux/slab.h>
19 18
20#include <plat/devs.h> 19#include <plat/devs.h>
21#include <plat/gpio-cfg.h> 20#include <plat/gpio-cfg.h>
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index d48245bb02b3..df8155b9d4d1 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -24,6 +24,8 @@
24#ifndef __PLAT_GPIO_CFG_H 24#ifndef __PLAT_GPIO_CFG_H
25#define __PLAT_GPIO_CFG_H __FILE__ 25#define __PLAT_GPIO_CFG_H __FILE__
26 26
27#include<linux/types.h>
28
27typedef unsigned int __bitwise__ samsung_gpio_pull_t; 29typedef unsigned int __bitwise__ samsung_gpio_pull_t;
28typedef unsigned int __bitwise__ s5p_gpio_drvstr_t; 30typedef unsigned int __bitwise__ s5p_gpio_drvstr_t;
29 31
diff --git a/arch/arm/plat-samsung/pd.c b/arch/arm/plat-samsung/pd.c
index efe1d564473e..312b510d86b7 100644
--- a/arch/arm/plat-samsung/pd.c
+++ b/arch/arm/plat-samsung/pd.c
@@ -11,7 +11,7 @@
11*/ 11*/
12 12
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/module.h> 14#include <linux/export.h>
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
diff --git a/arch/arm/plat-samsung/pwm.c b/arch/arm/plat-samsung/pwm.c
index dc1185dcf80d..c559d8438c70 100644
--- a/arch/arm/plat-samsung/pwm.c
+++ b/arch/arm/plat-samsung/pwm.c
@@ -11,7 +11,7 @@
11 * the Free Software Foundation; either version 2 of the License. 11 * the Free Software Foundation; either version 2 of the License.
12*/ 12*/
13 13
14#include <linux/module.h> 14#include <linux/export.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 5bdeef969847..ccbe16f47227 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -1123,5 +1123,6 @@ blissc MACH_BLISSC BLISSC 3491
1123thales_adc MACH_THALES_ADC THALES_ADC 3492 1123thales_adc MACH_THALES_ADC THALES_ADC 3492
1124ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493 1124ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493
1125atdgp318 MACH_ATDGP318 ATDGP318 3494 1125atdgp318 MACH_ATDGP318 ATDGP318 3494
1126m28evk MACH_M28EVK M28EVK 3613
1126smdk4212 MACH_SMDK4212 SMDK4212 3638 1127smdk4212 MACH_SMDK4212 SMDK4212 3638
1127smdk4412 MACH_SMDK4412 SMDK4412 3765 1128smdk4412 MACH_SMDK4412 SMDK4412 3765
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h
index 7fd0ec7b5b0f..ecacdf34768b 100644
--- a/arch/blackfin/include/asm/bfin_serial.h
+++ b/arch/blackfin/include/asm/bfin_serial.h
@@ -32,6 +32,8 @@ struct work_struct;
32struct bfin_serial_port { 32struct bfin_serial_port {
33 struct uart_port port; 33 struct uart_port port;
34 unsigned int old_status; 34 unsigned int old_status;
35 int tx_irq;
36 int rx_irq;
35 int status_irq; 37 int status_irq;
36#ifndef BFIN_UART_BF54X_STYLE 38#ifndef BFIN_UART_BF54X_STYLE
37 unsigned int lsr; 39 unsigned int lsr;
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index 1082e49f7a9f..d1c0c0cff3ef 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -373,8 +373,13 @@ static struct resource bfin_uart0_resources[] = {
373 .flags = IORESOURCE_MEM, 373 .flags = IORESOURCE_MEM,
374 }, 374 },
375 { 375 {
376 .start = IRQ_UART0_TX,
377 .end = IRQ_UART0_TX,
378 .flags = IORESOURCE_IRQ,
379 },
380 {
376 .start = IRQ_UART0_RX, 381 .start = IRQ_UART0_RX,
377 .end = IRQ_UART0_RX+1, 382 .end = IRQ_UART0_RX,
378 .flags = IORESOURCE_IRQ, 383 .flags = IORESOURCE_IRQ,
379 }, 384 },
380 { 385 {
@@ -416,8 +421,13 @@ static struct resource bfin_uart1_resources[] = {
416 .flags = IORESOURCE_MEM, 421 .flags = IORESOURCE_MEM,
417 }, 422 },
418 { 423 {
424 .start = IRQ_UART1_TX,
425 .end = IRQ_UART1_TX,
426 .flags = IORESOURCE_IRQ,
427 },
428 {
419 .start = IRQ_UART1_RX, 429 .start = IRQ_UART1_RX,
420 .end = IRQ_UART1_RX+1, 430 .end = IRQ_UART1_RX,
421 .flags = IORESOURCE_IRQ, 431 .flags = IORESOURCE_IRQ,
422 }, 432 },
423 { 433 {
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
index 55c127908815..5470bf89e52e 100644
--- a/arch/blackfin/mach-bf518/boards/tcm-bf518.c
+++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
@@ -309,8 +309,13 @@ static struct resource bfin_uart0_resources[] = {
309 .flags = IORESOURCE_MEM, 309 .flags = IORESOURCE_MEM,
310 }, 310 },
311 { 311 {
312 .start = IRQ_UART0_TX,
313 .end = IRQ_UART0_TX,
314 .flags = IORESOURCE_IRQ,
315 },
316 {
312 .start = IRQ_UART0_RX, 317 .start = IRQ_UART0_RX,
313 .end = IRQ_UART0_RX+1, 318 .end = IRQ_UART0_RX,
314 .flags = IORESOURCE_IRQ, 319 .flags = IORESOURCE_IRQ,
315 }, 320 },
316 { 321 {
@@ -352,8 +357,13 @@ static struct resource bfin_uart1_resources[] = {
352 .flags = IORESOURCE_MEM, 357 .flags = IORESOURCE_MEM,
353 }, 358 },
354 { 359 {
360 .start = IRQ_UART1_TX,
361 .end = IRQ_UART1_TX,
362 .flags = IORESOURCE_IRQ,
363 },
364 {
355 .start = IRQ_UART1_RX, 365 .start = IRQ_UART1_RX,
356 .end = IRQ_UART1_RX+1, 366 .end = IRQ_UART1_RX,
357 .flags = IORESOURCE_IRQ, 367 .flags = IORESOURCE_IRQ,
358 }, 368 },
359 { 369 {
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index 8d65d476f118..5bc6938157ad 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -381,8 +381,13 @@ static struct resource bfin_uart0_resources[] = {
381 .flags = IORESOURCE_MEM, 381 .flags = IORESOURCE_MEM,
382 }, 382 },
383 { 383 {
384 .start = IRQ_UART0_TX,
385 .end = IRQ_UART0_TX,
386 .flags = IORESOURCE_IRQ,
387 },
388 {
384 .start = IRQ_UART0_RX, 389 .start = IRQ_UART0_RX,
385 .end = IRQ_UART0_RX+1, 390 .end = IRQ_UART0_RX,
386 .flags = IORESOURCE_IRQ, 391 .flags = IORESOURCE_IRQ,
387 }, 392 },
388 { 393 {
@@ -424,8 +429,13 @@ static struct resource bfin_uart1_resources[] = {
424 .flags = IORESOURCE_MEM, 429 .flags = IORESOURCE_MEM,
425 }, 430 },
426 { 431 {
432 .start = IRQ_UART1_TX,
433 .end = IRQ_UART1_TX,
434 .flags = IORESOURCE_IRQ,
435 },
436 {
427 .start = IRQ_UART1_RX, 437 .start = IRQ_UART1_RX,
428 .end = IRQ_UART1_RX+1, 438 .end = IRQ_UART1_RX,
429 .flags = IORESOURCE_IRQ, 439 .flags = IORESOURCE_IRQ,
430 }, 440 },
431 { 441 {
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 6410fc1af8ed..cd289698b4dd 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/export.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12#include <linux/mtd/mtd.h> 13#include <linux/mtd/mtd.h>
13#include <linux/mtd/partitions.h> 14#include <linux/mtd/partitions.h>
@@ -539,8 +540,13 @@ static struct resource bfin_uart0_resources[] = {
539 .flags = IORESOURCE_MEM, 540 .flags = IORESOURCE_MEM,
540 }, 541 },
541 { 542 {
543 .start = IRQ_UART0_TX,
544 .end = IRQ_UART0_TX,
545 .flags = IORESOURCE_IRQ,
546 },
547 {
542 .start = IRQ_UART0_RX, 548 .start = IRQ_UART0_RX,
543 .end = IRQ_UART0_RX+1, 549 .end = IRQ_UART0_RX,
544 .flags = IORESOURCE_IRQ, 550 .flags = IORESOURCE_IRQ,
545 }, 551 },
546 { 552 {
@@ -582,8 +588,13 @@ static struct resource bfin_uart1_resources[] = {
582 .flags = IORESOURCE_MEM, 588 .flags = IORESOURCE_MEM,
583 }, 589 },
584 { 590 {
591 .start = IRQ_UART1_TX,
592 .end = IRQ_UART1_TX,
593 .flags = IORESOURCE_IRQ,
594 },
595 {
585 .start = IRQ_UART1_RX, 596 .start = IRQ_UART1_RX,
586 .end = IRQ_UART1_RX+1, 597 .end = IRQ_UART1_RX,
587 .flags = IORESOURCE_IRQ, 598 .flags = IORESOURCE_IRQ,
588 }, 599 },
589 { 600 {
@@ -801,7 +812,6 @@ static struct platform_device bfin_sport1_uart_device = {
801#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 812#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
802#include <linux/input.h> 813#include <linux/input.h>
803#include <linux/gpio_keys.h> 814#include <linux/gpio_keys.h>
804#include <linux/export.h>
805 815
806static struct gpio_keys_button bfin_gpio_keys_table[] = { 816static struct gpio_keys_button bfin_gpio_keys_table[] = {
807 {BTN_0, GPIO_PF14, 1, "gpio-keys: BTN0"}, 817 {BTN_0, GPIO_PF14, 1, "gpio-keys: BTN0"},
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 64f7278aba53..9f792eafd1cc 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/export.h>
10#include <linux/platform_device.h> 11#include <linux/platform_device.h>
11#include <linux/mtd/mtd.h> 12#include <linux/mtd/mtd.h>
12#include <linux/mtd/partitions.h> 13#include <linux/mtd/partitions.h>
@@ -417,8 +418,13 @@ static struct resource bfin_uart0_resources[] = {
417 .flags = IORESOURCE_MEM, 418 .flags = IORESOURCE_MEM,
418 }, 419 },
419 { 420 {
421 .start = IRQ_UART0_TX,
422 .end = IRQ_UART0_TX,
423 .flags = IORESOURCE_IRQ,
424 },
425 {
420 .start = IRQ_UART0_RX, 426 .start = IRQ_UART0_RX,
421 .end = IRQ_UART0_RX+1, 427 .end = IRQ_UART0_RX,
422 .flags = IORESOURCE_IRQ, 428 .flags = IORESOURCE_IRQ,
423 }, 429 },
424 { 430 {
@@ -460,8 +466,13 @@ static struct resource bfin_uart1_resources[] = {
460 .flags = IORESOURCE_MEM, 466 .flags = IORESOURCE_MEM,
461 }, 467 },
462 { 468 {
469 .start = IRQ_UART1_TX,
470 .end = IRQ_UART1_TX,
471 .flags = IORESOURCE_IRQ,
472 },
473 {
463 .start = IRQ_UART1_RX, 474 .start = IRQ_UART1_RX,
464 .end = IRQ_UART1_RX+1, 475 .end = IRQ_UART1_RX,
465 .flags = IORESOURCE_IRQ, 476 .flags = IORESOURCE_IRQ,
466 }, 477 },
467 { 478 {
@@ -674,7 +685,6 @@ static struct platform_device bfin_sport1_uart_device = {
674#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 685#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
675#include <linux/input.h> 686#include <linux/input.h>
676#include <linux/gpio_keys.h> 687#include <linux/gpio_keys.h>
677#include <linux/export.h>
678 688
679static struct gpio_keys_button bfin_gpio_keys_table[] = { 689static struct gpio_keys_button bfin_gpio_keys_table[] = {
680 {BTN_0, GPIO_PG0, 1, "gpio-keys: BTN0"}, 690 {BTN_0, GPIO_PG0, 1, "gpio-keys: BTN0"},
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index e4c6a122b66c..3ecafff5d2ef 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -711,8 +711,13 @@ static struct resource bfin_uart0_resources[] = {
711 .flags = IORESOURCE_MEM, 711 .flags = IORESOURCE_MEM,
712 }, 712 },
713 { 713 {
714 .start = IRQ_UART0_TX,
715 .end = IRQ_UART0_TX,
716 .flags = IORESOURCE_IRQ,
717 },
718 {
714 .start = IRQ_UART0_RX, 719 .start = IRQ_UART0_RX,
715 .end = IRQ_UART0_RX+1, 720 .end = IRQ_UART0_RX,
716 .flags = IORESOURCE_IRQ, 721 .flags = IORESOURCE_IRQ,
717 }, 722 },
718 { 723 {
@@ -754,8 +759,13 @@ static struct resource bfin_uart1_resources[] = {
754 .flags = IORESOURCE_MEM, 759 .flags = IORESOURCE_MEM,
755 }, 760 },
756 { 761 {
762 .start = IRQ_UART1_TX,
763 .end = IRQ_UART1_TX,
764 .flags = IORESOURCE_IRQ,
765 },
766 {
757 .start = IRQ_UART1_RX, 767 .start = IRQ_UART1_RX,
758 .end = IRQ_UART1_RX+1, 768 .end = IRQ_UART1_RX,
759 .flags = IORESOURCE_IRQ, 769 .flags = IORESOURCE_IRQ,
760 }, 770 },
761 { 771 {
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index 76dbc03a8d4d..3a92c4318d2d 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -496,8 +496,13 @@ static struct resource bfin_uart0_resources[] = {
496 .flags = IORESOURCE_MEM, 496 .flags = IORESOURCE_MEM,
497 }, 497 },
498 { 498 {
499 .start = IRQ_UART0_TX,
500 .end = IRQ_UART0_TX,
501 .flags = IORESOURCE_IRQ,
502 },
503 {
499 .start = IRQ_UART0_RX, 504 .start = IRQ_UART0_RX,
500 .end = IRQ_UART0_RX+1, 505 .end = IRQ_UART0_RX,
501 .flags = IORESOURCE_IRQ, 506 .flags = IORESOURCE_IRQ,
502 }, 507 },
503 { 508 {
@@ -540,8 +545,13 @@ static struct resource bfin_uart1_resources[] = {
540 .flags = IORESOURCE_MEM, 545 .flags = IORESOURCE_MEM,
541 }, 546 },
542 { 547 {
548 .start = IRQ_UART1_TX,
549 .end = IRQ_UART1_TX,
550 .flags = IORESOURCE_IRQ,
551 },
552 {
543 .start = IRQ_UART1_RX, 553 .start = IRQ_UART1_RX,
544 .end = IRQ_UART1_RX+1, 554 .end = IRQ_UART1_RX,
545 .flags = IORESOURCE_IRQ, 555 .flags = IORESOURCE_IRQ,
546 }, 556 },
547 { 557 {
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 5da5787fc4ef..47cadd316e76 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -238,8 +238,13 @@ static struct resource bfin_uart0_resources[] = {
238 .flags = IORESOURCE_MEM, 238 .flags = IORESOURCE_MEM,
239 }, 239 },
240 { 240 {
241 .start = IRQ_UART0_TX,
242 .end = IRQ_UART0_TX,
243 .flags = IORESOURCE_IRQ,
244 },
245 {
241 .start = IRQ_UART0_RX, 246 .start = IRQ_UART0_RX,
242 .end = IRQ_UART0_RX + 1, 247 .end = IRQ_UART0_RX,
243 .flags = IORESOURCE_IRQ, 248 .flags = IORESOURCE_IRQ,
244 }, 249 },
245 { 250 {
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index b0ec825fb4ec..18817d57c7a1 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -193,8 +193,13 @@ static struct resource bfin_uart0_resources[] = {
193 .flags = IORESOURCE_MEM, 193 .flags = IORESOURCE_MEM,
194 }, 194 },
195 { 195 {
196 .start = IRQ_UART0_TX,
197 .end = IRQ_UART0_TX,
198 .flags = IORESOURCE_IRQ,
199 },
200 {
196 .start = IRQ_UART0_RX, 201 .start = IRQ_UART0_RX,
197 .end = IRQ_UART0_RX + 1, 202 .end = IRQ_UART0_RX,
198 .flags = IORESOURCE_IRQ, 203 .flags = IORESOURCE_IRQ,
199 }, 204 },
200 { 205 {
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index 14f54a31e74c..2c8f30ef6a7b 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -221,8 +221,13 @@ static struct resource bfin_uart0_resources[] = {
221 .flags = IORESOURCE_MEM, 221 .flags = IORESOURCE_MEM,
222 }, 222 },
223 { 223 {
224 .start = IRQ_UART0_TX,
225 .end = IRQ_UART0_TX,
226 .flags = IORESOURCE_IRQ,
227 },
228 {
224 .start = IRQ_UART0_RX, 229 .start = IRQ_UART0_RX,
225 .end = IRQ_UART0_RX + 1, 230 .end = IRQ_UART0_RX,
226 .flags = IORESOURCE_IRQ, 231 .flags = IORESOURCE_IRQ,
227 }, 232 },
228 { 233 {
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index ecd2801f050d..144556e14499 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -292,8 +292,13 @@ static struct resource bfin_uart0_resources[] = {
292 .flags = IORESOURCE_MEM, 292 .flags = IORESOURCE_MEM,
293 }, 293 },
294 { 294 {
295 .start = IRQ_UART0_TX,
296 .end = IRQ_UART0_TX,
297 .flags = IORESOURCE_IRQ,
298 },
299 {
295 .start = IRQ_UART0_RX, 300 .start = IRQ_UART0_RX,
296 .end = IRQ_UART0_RX + 1, 301 .end = IRQ_UART0_RX,
297 .flags = IORESOURCE_IRQ, 302 .flags = IORESOURCE_IRQ,
298 }, 303 },
299 { 304 {
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index fbee77fa9211..b597d4e50d58 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -151,8 +151,13 @@ static struct resource bfin_uart0_resources[] = {
151 .flags = IORESOURCE_MEM, 151 .flags = IORESOURCE_MEM,
152 }, 152 },
153 { 153 {
154 .start = IRQ_UART0_TX,
155 .end = IRQ_UART0_TX,
156 .flags = IORESOURCE_IRQ,
157 },
158 {
154 .start = IRQ_UART0_RX, 159 .start = IRQ_UART0_RX,
155 .end = IRQ_UART0_RX + 1, 160 .end = IRQ_UART0_RX,
156 .flags = IORESOURCE_IRQ, 161 .flags = IORESOURCE_IRQ,
157 }, 162 },
158 { 163 {
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 964a8e5f79b4..2afd02e14bd1 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -298,8 +298,13 @@ static struct resource bfin_uart0_resources[] = {
298 .flags = IORESOURCE_MEM, 298 .flags = IORESOURCE_MEM,
299 }, 299 },
300 { 300 {
301 .start = IRQ_UART0_TX,
302 .end = IRQ_UART0_TX,
303 .flags = IORESOURCE_IRQ,
304 },
305 {
301 .start = IRQ_UART0_RX, 306 .start = IRQ_UART0_RX,
302 .end = IRQ_UART0_RX + 1, 307 .end = IRQ_UART0_RX,
303 .flags = IORESOURCE_IRQ, 308 .flags = IORESOURCE_IRQ,
304 }, 309 },
305 { 310 {
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index 1471c51ea697..604a430038e1 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/export.h>
11#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
13#include <linux/mtd/mtd.h> 14#include <linux/mtd/mtd.h>
@@ -305,8 +306,13 @@ static struct resource bfin_uart0_resources[] = {
305 .flags = IORESOURCE_MEM, 306 .flags = IORESOURCE_MEM,
306 }, 307 },
307 { 308 {
309 .start = IRQ_UART0_TX,
310 .end = IRQ_UART0_TX,
311 .flags = IORESOURCE_IRQ,
312 },
313 {
308 .start = IRQ_UART0_RX, 314 .start = IRQ_UART0_RX,
309 .end = IRQ_UART0_RX+1, 315 .end = IRQ_UART0_RX,
310 .flags = IORESOURCE_IRQ, 316 .flags = IORESOURCE_IRQ,
311 }, 317 },
312 { 318 {
@@ -366,8 +372,13 @@ static struct resource bfin_uart1_resources[] = {
366 .flags = IORESOURCE_MEM, 372 .flags = IORESOURCE_MEM,
367 }, 373 },
368 { 374 {
375 .start = IRQ_UART1_TX,
376 .end = IRQ_UART1_TX,
377 .flags = IORESOURCE_IRQ,
378 },
379 {
369 .start = IRQ_UART1_RX, 380 .start = IRQ_UART1_RX,
370 .end = IRQ_UART1_RX+1, 381 .end = IRQ_UART1_RX,
371 .flags = IORESOURCE_IRQ, 382 .flags = IORESOURCE_IRQ,
372 }, 383 },
373 { 384 {
@@ -569,7 +580,6 @@ static struct platform_device bfin_sport1_uart_device = {
569 580
570#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 581#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
571#include <linux/bfin_mac.h> 582#include <linux/bfin_mac.h>
572#include <linux/export.h>
573static const unsigned short bfin_mac_peripherals[] = P_MII0; 583static const unsigned short bfin_mac_peripherals[] = P_MII0;
574 584
575static struct bfin_phydev_platform_data bfin_phydev_data[] = { 585static struct bfin_phydev_platform_data bfin_phydev_data[] = {
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index 47cf37de33ba..d916b46a44fe 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
12#include <linux/export.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
13#include <linux/mtd/mtd.h> 14#include <linux/mtd/mtd.h>
14#include <linux/mtd/partitions.h> 15#include <linux/mtd/partitions.h>
@@ -306,8 +307,13 @@ static struct resource bfin_uart0_resources[] = {
306 .flags = IORESOURCE_MEM, 307 .flags = IORESOURCE_MEM,
307 }, 308 },
308 { 309 {
310 .start = IRQ_UART0_TX,
311 .end = IRQ_UART0_TX,
312 .flags = IORESOURCE_IRQ,
313 },
314 {
309 .start = IRQ_UART0_RX, 315 .start = IRQ_UART0_RX,
310 .end = IRQ_UART0_RX+1, 316 .end = IRQ_UART0_RX,
311 .flags = IORESOURCE_IRQ, 317 .flags = IORESOURCE_IRQ,
312 }, 318 },
313 { 319 {
@@ -349,8 +355,13 @@ static struct resource bfin_uart1_resources[] = {
349 .flags = IORESOURCE_MEM, 355 .flags = IORESOURCE_MEM,
350 }, 356 },
351 { 357 {
358 .start = IRQ_UART1_TX,
359 .end = IRQ_UART1_TX,
360 .flags = IORESOURCE_IRQ,
361 },
362 {
352 .start = IRQ_UART1_RX, 363 .start = IRQ_UART1_RX,
353 .end = IRQ_UART1_RX+1, 364 .end = IRQ_UART1_RX,
354 .flags = IORESOURCE_IRQ, 365 .flags = IORESOURCE_IRQ,
355 }, 366 },
356 { 367 {
@@ -534,7 +545,6 @@ static struct platform_device bfin_sport1_uart_device = {
534 545
535#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 546#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
536#include <linux/bfin_mac.h> 547#include <linux/bfin_mac.h>
537#include <linux/export.h>
538static const unsigned short bfin_mac_peripherals[] = P_MII0; 548static const unsigned short bfin_mac_peripherals[] = P_MII0;
539 549
540static struct bfin_phydev_platform_data bfin_phydev_data[] = { 550static struct bfin_phydev_platform_data bfin_phydev_data[] = {
diff --git a/arch/blackfin/mach-bf537/boards/dnp5370.c b/arch/blackfin/mach-bf537/boards/dnp5370.c
index 33e69e427e98..5f307228be63 100644
--- a/arch/blackfin/mach-bf537/boards/dnp5370.c
+++ b/arch/blackfin/mach-bf537/boards/dnp5370.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/export.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17#include <linux/io.h> 18#include <linux/io.h>
@@ -49,7 +50,6 @@ static struct platform_device rtc_device = {
49 50
50#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 51#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
51#include <linux/bfin_mac.h> 52#include <linux/bfin_mac.h>
52#include <linux/export.h>
53static const unsigned short bfin_mac_peripherals[] = P_RMII0; 53static const unsigned short bfin_mac_peripherals[] = P_RMII0;
54 54
55static struct bfin_phydev_platform_data bfin_phydev_data[] = { 55static struct bfin_phydev_platform_data bfin_phydev_data[] = {
@@ -237,8 +237,13 @@ static struct resource bfin_uart0_resources[] = {
237 .flags = IORESOURCE_MEM, 237 .flags = IORESOURCE_MEM,
238 }, 238 },
239 { 239 {
240 .start = IRQ_UART0_TX,
241 .end = IRQ_UART0_TX,
242 .flags = IORESOURCE_IRQ,
243 },
244 {
240 .start = IRQ_UART0_RX, 245 .start = IRQ_UART0_RX,
241 .end = IRQ_UART0_RX+1, 246 .end = IRQ_UART0_RX,
242 .flags = IORESOURCE_IRQ, 247 .flags = IORESOURCE_IRQ,
243 }, 248 },
244 { 249 {
@@ -281,8 +286,13 @@ static struct resource bfin_uart1_resources[] = {
281 .flags = IORESOURCE_MEM, 286 .flags = IORESOURCE_MEM,
282 }, 287 },
283 { 288 {
289 .start = IRQ_UART1_TX,
290 .end = IRQ_UART1_TX,
291 .flags = IORESOURCE_IRQ,
292 },
293 {
284 .start = IRQ_UART1_RX, 294 .start = IRQ_UART1_RX,
285 .end = IRQ_UART1_RX+1, 295 .end = IRQ_UART1_RX,
286 .flags = IORESOURCE_IRQ, 296 .flags = IORESOURCE_IRQ,
287 }, 297 },
288 { 298 {
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index c62f9dccd9f7..3901dd093b90 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -240,8 +240,13 @@ static struct resource bfin_uart0_resources[] = {
240 .flags = IORESOURCE_MEM, 240 .flags = IORESOURCE_MEM,
241 }, 241 },
242 { 242 {
243 .start = IRQ_UART0_TX,
244 .end = IRQ_UART0_TX,
245 .flags = IORESOURCE_IRQ,
246 },
247 {
243 .start = IRQ_UART0_RX, 248 .start = IRQ_UART0_RX,
244 .end = IRQ_UART0_RX+1, 249 .end = IRQ_UART0_RX,
245 .flags = IORESOURCE_IRQ, 250 .flags = IORESOURCE_IRQ,
246 }, 251 },
247 { 252 {
@@ -283,8 +288,13 @@ static struct resource bfin_uart1_resources[] = {
283 .flags = IORESOURCE_MEM, 288 .flags = IORESOURCE_MEM,
284 }, 289 },
285 { 290 {
291 .start = IRQ_UART1_TX,
292 .end = IRQ_UART1_TX,
293 .flags = IORESOURCE_IRQ,
294 },
295 {
286 .start = IRQ_UART1_RX, 296 .start = IRQ_UART1_RX,
287 .end = IRQ_UART1_RX+1, 297 .end = IRQ_UART1_RX,
288 .flags = IORESOURCE_IRQ, 298 .flags = IORESOURCE_IRQ,
289 }, 299 },
290 { 300 {
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 3099e91114fc..aebd31c845f0 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/etherdevice.h> 10#include <linux/etherdevice.h>
11#include <linux/export.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12#include <linux/mtd/mtd.h> 13#include <linux/mtd/mtd.h>
13#include <linux/mtd/partitions.h> 14#include <linux/mtd/partitions.h>
@@ -309,8 +310,13 @@ static struct resource bfin_uart0_resources[] = {
309 .flags = IORESOURCE_MEM, 310 .flags = IORESOURCE_MEM,
310 }, 311 },
311 { 312 {
313 .start = IRQ_UART0_TX,
314 .end = IRQ_UART0_TX,
315 .flags = IORESOURCE_IRQ,
316 },
317 {
312 .start = IRQ_UART0_RX, 318 .start = IRQ_UART0_RX,
313 .end = IRQ_UART0_RX+1, 319 .end = IRQ_UART0_RX,
314 .flags = IORESOURCE_IRQ, 320 .flags = IORESOURCE_IRQ,
315 }, 321 },
316 { 322 {
@@ -352,8 +358,13 @@ static struct resource bfin_uart1_resources[] = {
352 .flags = IORESOURCE_MEM, 358 .flags = IORESOURCE_MEM,
353 }, 359 },
354 { 360 {
361 .start = IRQ_UART1_TX,
362 .end = IRQ_UART1_TX,
363 .flags = IORESOURCE_IRQ,
364 },
365 {
355 .start = IRQ_UART1_RX, 366 .start = IRQ_UART1_RX,
356 .end = IRQ_UART1_RX+1, 367 .end = IRQ_UART1_RX,
357 .flags = IORESOURCE_IRQ, 368 .flags = IORESOURCE_IRQ,
358 }, 369 },
359 { 370 {
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 27f955db9976..7fbb0bbf8676 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/export.h>
10#include <linux/kernel.h> 11#include <linux/kernel.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12#include <linux/io.h> 13#include <linux/io.h>
@@ -1566,8 +1567,13 @@ static struct resource bfin_uart0_resources[] = {
1566 .flags = IORESOURCE_MEM, 1567 .flags = IORESOURCE_MEM,
1567 }, 1568 },
1568 { 1569 {
1570 .start = IRQ_UART0_TX,
1571 .end = IRQ_UART0_TX,
1572 .flags = IORESOURCE_IRQ,
1573 },
1574 {
1569 .start = IRQ_UART0_RX, 1575 .start = IRQ_UART0_RX,
1570 .end = IRQ_UART0_RX+1, 1576 .end = IRQ_UART0_RX,
1571 .flags = IORESOURCE_IRQ, 1577 .flags = IORESOURCE_IRQ,
1572 }, 1578 },
1573 { 1579 {
@@ -1621,8 +1627,13 @@ static struct resource bfin_uart1_resources[] = {
1621 .flags = IORESOURCE_MEM, 1627 .flags = IORESOURCE_MEM,
1622 }, 1628 },
1623 { 1629 {
1630 .start = IRQ_UART1_TX,
1631 .end = IRQ_UART1_TX,
1632 .flags = IORESOURCE_IRQ,
1633 },
1634 {
1624 .start = IRQ_UART1_RX, 1635 .start = IRQ_UART1_RX,
1625 .end = IRQ_UART1_RX+1, 1636 .end = IRQ_UART1_RX,
1626 .flags = IORESOURCE_IRQ, 1637 .flags = IORESOURCE_IRQ,
1627 }, 1638 },
1628 { 1639 {
@@ -1992,7 +2003,6 @@ static struct adp8870_backlight_platform_data adp8870_pdata = {
1992 2003
1993#if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE) 2004#if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE)
1994#include <linux/i2c/adp8860.h> 2005#include <linux/i2c/adp8860.h>
1995#include <linux/export.h>
1996static struct led_info adp8860_leds[] = { 2006static struct led_info adp8860_leds[] = {
1997 { 2007 {
1998 .name = "adp8860-led7", 2008 .name = "adp8860-led7",
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 841803038d6f..6917ce2fa55e 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
12#include <linux/export.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
13#include <linux/mtd/mtd.h> 14#include <linux/mtd/mtd.h>
14#include <linux/mtd/partitions.h> 15#include <linux/mtd/partitions.h>
@@ -306,8 +307,13 @@ static struct resource bfin_uart0_resources[] = {
306 .flags = IORESOURCE_MEM, 307 .flags = IORESOURCE_MEM,
307 }, 308 },
308 { 309 {
310 .start = IRQ_UART0_TX,
311 .end = IRQ_UART0_TX,
312 .flags = IORESOURCE_IRQ,
313 },
314 {
309 .start = IRQ_UART0_RX, 315 .start = IRQ_UART0_RX,
310 .end = IRQ_UART0_RX+1, 316 .end = IRQ_UART0_RX,
311 .flags = IORESOURCE_IRQ, 317 .flags = IORESOURCE_IRQ,
312 }, 318 },
313 { 319 {
@@ -349,8 +355,13 @@ static struct resource bfin_uart1_resources[] = {
349 .flags = IORESOURCE_MEM, 355 .flags = IORESOURCE_MEM,
350 }, 356 },
351 { 357 {
358 .start = IRQ_UART1_TX,
359 .end = IRQ_UART1_TX,
360 .flags = IORESOURCE_IRQ,
361 },
362 {
352 .start = IRQ_UART1_RX, 363 .start = IRQ_UART1_RX,
353 .end = IRQ_UART1_RX+1, 364 .end = IRQ_UART1_RX,
354 .flags = IORESOURCE_IRQ, 365 .flags = IORESOURCE_IRQ,
355 }, 366 },
356 { 367 {
@@ -536,7 +547,6 @@ static struct platform_device bfin_sport1_uart_device = {
536 547
537#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 548#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
538#include <linux/bfin_mac.h> 549#include <linux/bfin_mac.h>
539#include <linux/export.h>
540static const unsigned short bfin_mac_peripherals[] = P_MII0; 550static const unsigned short bfin_mac_peripherals[] = P_MII0;
541 551
542static struct bfin_phydev_platform_data bfin_phydev_data[] = { 552static struct bfin_phydev_platform_data bfin_phydev_data[] = {
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index 629f3c333415..8356eb599f19 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -49,8 +49,13 @@ static struct resource bfin_uart0_resources[] = {
49 .flags = IORESOURCE_MEM, 49 .flags = IORESOURCE_MEM,
50 }, 50 },
51 { 51 {
52 .start = IRQ_UART0_TX,
53 .end = IRQ_UART0_TX,
54 .flags = IORESOURCE_IRQ,
55 },
56 {
52 .start = IRQ_UART0_RX, 57 .start = IRQ_UART0_RX,
53 .end = IRQ_UART0_RX+1, 58 .end = IRQ_UART0_RX,
54 .flags = IORESOURCE_IRQ, 59 .flags = IORESOURCE_IRQ,
55 }, 60 },
56 { 61 {
@@ -104,8 +109,13 @@ static struct resource bfin_uart1_resources[] = {
104 .flags = IORESOURCE_MEM, 109 .flags = IORESOURCE_MEM,
105 }, 110 },
106 { 111 {
112 .start = IRQ_UART1_TX,
113 .end = IRQ_UART1_TX,
114 .flags = IORESOURCE_IRQ,
115 },
116 {
107 .start = IRQ_UART1_RX, 117 .start = IRQ_UART1_RX,
108 .end = IRQ_UART1_RX+1, 118 .end = IRQ_UART1_RX,
109 .flags = IORESOURCE_IRQ, 119 .flags = IORESOURCE_IRQ,
110 }, 120 },
111 { 121 {
@@ -147,8 +157,13 @@ static struct resource bfin_uart2_resources[] = {
147 .flags = IORESOURCE_MEM, 157 .flags = IORESOURCE_MEM,
148 }, 158 },
149 { 159 {
160 .start = IRQ_UART2_TX,
161 .end = IRQ_UART2_TX,
162 .flags = IORESOURCE_IRQ,
163 },
164 {
150 .start = IRQ_UART2_RX, 165 .start = IRQ_UART2_RX,
151 .end = IRQ_UART2_RX+1, 166 .end = IRQ_UART2_RX,
152 .flags = IORESOURCE_IRQ, 167 .flags = IORESOURCE_IRQ,
153 }, 168 },
154 { 169 {
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index 212b9e0a08c8..0350eacec21b 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -135,8 +135,13 @@ static struct resource bfin_uart0_resources[] = {
135 .flags = IORESOURCE_MEM, 135 .flags = IORESOURCE_MEM,
136 }, 136 },
137 { 137 {
138 .start = IRQ_UART0_TX,
139 .end = IRQ_UART0_TX,
140 .flags = IORESOURCE_IRQ,
141 },
142 {
138 .start = IRQ_UART0_RX, 143 .start = IRQ_UART0_RX,
139 .end = IRQ_UART0_RX+1, 144 .end = IRQ_UART0_RX,
140 .flags = IORESOURCE_IRQ, 145 .flags = IORESOURCE_IRQ,
141 }, 146 },
142 { 147 {
@@ -178,8 +183,13 @@ static struct resource bfin_uart1_resources[] = {
178 .flags = IORESOURCE_MEM, 183 .flags = IORESOURCE_MEM,
179 }, 184 },
180 { 185 {
186 .start = IRQ_UART1_TX,
187 .end = IRQ_UART1_TX,
188 .flags = IORESOURCE_IRQ,
189 },
190 {
181 .start = IRQ_UART1_RX, 191 .start = IRQ_UART1_RX,
182 .end = IRQ_UART1_RX+1, 192 .end = IRQ_UART1_RX,
183 .flags = IORESOURCE_IRQ, 193 .flags = IORESOURCE_IRQ,
184 }, 194 },
185 { 195 {
@@ -237,8 +247,13 @@ static struct resource bfin_uart2_resources[] = {
237 .flags = IORESOURCE_MEM, 247 .flags = IORESOURCE_MEM,
238 }, 248 },
239 { 249 {
250 .start = IRQ_UART2_TX,
251 .end = IRQ_UART2_TX,
252 .flags = IORESOURCE_IRQ,
253 },
254 {
240 .start = IRQ_UART2_RX, 255 .start = IRQ_UART2_RX,
241 .end = IRQ_UART2_RX+1, 256 .end = IRQ_UART2_RX,
242 .flags = IORESOURCE_IRQ, 257 .flags = IORESOURCE_IRQ,
243 }, 258 },
244 { 259 {
@@ -280,8 +295,13 @@ static struct resource bfin_uart3_resources[] = {
280 .flags = IORESOURCE_MEM, 295 .flags = IORESOURCE_MEM,
281 }, 296 },
282 { 297 {
298 .start = IRQ_UART3_TX,
299 .end = IRQ_UART3_TX,
300 .flags = IORESOURCE_IRQ,
301 },
302 {
283 .start = IRQ_UART3_RX, 303 .start = IRQ_UART3_RX,
284 .end = IRQ_UART3_RX+1, 304 .end = IRQ_UART3_RX,
285 .flags = IORESOURCE_IRQ, 305 .flags = IORESOURCE_IRQ,
286 }, 306 },
287 { 307 {
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index cd9cbb68de69..bb868ac0fe2d 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -241,8 +241,13 @@ static struct resource bfin_uart0_resources[] = {
241 .flags = IORESOURCE_MEM, 241 .flags = IORESOURCE_MEM,
242 }, 242 },
243 { 243 {
244 .start = IRQ_UART0_TX,
245 .end = IRQ_UART0_TX,
246 .flags = IORESOURCE_IRQ,
247 },
248 {
244 .start = IRQ_UART0_RX, 249 .start = IRQ_UART0_RX,
245 .end = IRQ_UART0_RX+1, 250 .end = IRQ_UART0_RX,
246 .flags = IORESOURCE_IRQ, 251 .flags = IORESOURCE_IRQ,
247 }, 252 },
248 { 253 {
@@ -284,8 +289,13 @@ static struct resource bfin_uart1_resources[] = {
284 .flags = IORESOURCE_MEM, 289 .flags = IORESOURCE_MEM,
285 }, 290 },
286 { 291 {
292 .start = IRQ_UART1_TX,
293 .end = IRQ_UART1_TX,
294 .flags = IORESOURCE_IRQ,
295 },
296 {
287 .start = IRQ_UART1_RX, 297 .start = IRQ_UART1_RX,
288 .end = IRQ_UART1_RX+1, 298 .end = IRQ_UART1_RX,
289 .flags = IORESOURCE_IRQ, 299 .flags = IORESOURCE_IRQ,
290 }, 300 },
291 { 301 {
@@ -343,8 +353,13 @@ static struct resource bfin_uart2_resources[] = {
343 .flags = IORESOURCE_MEM, 353 .flags = IORESOURCE_MEM,
344 }, 354 },
345 { 355 {
356 .start = IRQ_UART2_TX,
357 .end = IRQ_UART2_TX,
358 .flags = IORESOURCE_IRQ,
359 },
360 {
346 .start = IRQ_UART2_RX, 361 .start = IRQ_UART2_RX,
347 .end = IRQ_UART2_RX+1, 362 .end = IRQ_UART2_RX,
348 .flags = IORESOURCE_IRQ, 363 .flags = IORESOURCE_IRQ,
349 }, 364 },
350 { 365 {
@@ -386,8 +401,13 @@ static struct resource bfin_uart3_resources[] = {
386 .flags = IORESOURCE_MEM, 401 .flags = IORESOURCE_MEM,
387 }, 402 },
388 { 403 {
404 .start = IRQ_UART3_TX,
405 .end = IRQ_UART3_TX,
406 .flags = IORESOURCE_IRQ,
407 },
408 {
389 .start = IRQ_UART3_RX, 409 .start = IRQ_UART3_RX,
390 .end = IRQ_UART3_RX+1, 410 .end = IRQ_UART3_RX,
391 .flags = IORESOURCE_IRQ, 411 .flags = IORESOURCE_IRQ,
392 }, 412 },
393 { 413 {
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 972e1347c6bc..b1b7339b6ba7 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -203,8 +203,13 @@ static struct resource bfin_uart0_resources[] = {
203 .flags = IORESOURCE_MEM, 203 .flags = IORESOURCE_MEM,
204 }, 204 },
205 { 205 {
206 .start = IRQ_UART_TX,
207 .end = IRQ_UART_TX,
208 .flags = IORESOURCE_IRQ,
209 },
210 {
206 .start = IRQ_UART_RX, 211 .start = IRQ_UART_RX,
207 .end = IRQ_UART_RX + 1, 212 .end = IRQ_UART_RX,
208 .flags = IORESOURCE_IRQ, 213 .flags = IORESOURCE_IRQ,
209 }, 214 },
210 { 215 {
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index c1b72f2d6354..c017cf07ed4e 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -277,8 +277,13 @@ static struct resource bfin_uart0_resources[] = {
277 .flags = IORESOURCE_MEM, 277 .flags = IORESOURCE_MEM,
278 }, 278 },
279 { 279 {
280 .start = IRQ_UART_TX,
281 .end = IRQ_UART_TX,
282 .flags = IORESOURCE_IRQ,
283 },
284 {
280 .start = IRQ_UART_RX, 285 .start = IRQ_UART_RX,
281 .end = IRQ_UART_RX+1, 286 .end = IRQ_UART_RX,
282 .flags = IORESOURCE_IRQ, 287 .flags = IORESOURCE_IRQ,
283 }, 288 },
284 { 289 {
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 9490dc800ca5..27f22ed381d9 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -172,8 +172,13 @@ static struct resource bfin_uart0_resources[] = {
172 .flags = IORESOURCE_MEM, 172 .flags = IORESOURCE_MEM,
173 }, 173 },
174 { 174 {
175 .start = IRQ_UART_TX,
176 .end = IRQ_UART_TX,
177 .flags = IORESOURCE_IRQ,
178 },
179 {
175 .start = IRQ_UART_RX, 180 .start = IRQ_UART_RX,
176 .end = IRQ_UART_RX+1, 181 .end = IRQ_UART_RX,
177 .flags = IORESOURCE_IRQ, 182 .flags = IORESOURCE_IRQ,
178 }, 183 },
179 { 184 {
diff --git a/arch/blackfin/mach-bf561/boards/tepla.c b/arch/blackfin/mach-bf561/boards/tepla.c
index bb056e60f6ed..1a57bc986aad 100644
--- a/arch/blackfin/mach-bf561/boards/tepla.c
+++ b/arch/blackfin/mach-bf561/boards/tepla.c
@@ -51,8 +51,13 @@ static struct resource bfin_uart0_resources[] = {
51 .flags = IORESOURCE_MEM, 51 .flags = IORESOURCE_MEM,
52 }, 52 },
53 { 53 {
54 .start = IRQ_UART_TX,
55 .end = IRQ_UART_TX,
56 .flags = IORESOURCE_IRQ,
57 },
58 {
54 .start = IRQ_UART_RX, 59 .start = IRQ_UART_RX,
55 .end = IRQ_UART_RX+1, 60 .end = IRQ_UART_RX,
56 .flags = IORESOURCE_IRQ, 61 .flags = IORESOURCE_IRQ,
57 }, 62 },
58 { 63 {
diff --git a/arch/cris/arch-v10/drivers/Kconfig b/arch/cris/arch-v10/drivers/Kconfig
index 32d90867a984..5f2cdb3e428c 100644
--- a/arch/cris/arch-v10/drivers/Kconfig
+++ b/arch/cris/arch-v10/drivers/Kconfig
@@ -3,7 +3,7 @@ if ETRAX_ARCH_V10
3config ETRAX_ETHERNET 3config ETRAX_ETHERNET
4 bool "Ethernet support" 4 bool "Ethernet support"
5 depends on ETRAX_ARCH_V10 5 depends on ETRAX_ARCH_V10
6 select NET_ETHERNET 6 select ETHERNET
7 select NET_CORE 7 select NET_CORE
8 select MII 8 select MII
9 help 9 help
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index e47e9c3401b0..de43aadcdbc4 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -3,7 +3,7 @@ if ETRAX_ARCH_V32
3config ETRAX_ETHERNET 3config ETRAX_ETHERNET
4 bool "Ethernet support" 4 bool "Ethernet support"
5 depends on ETRAX_ARCH_V32 5 depends on ETRAX_ARCH_V32
6 select NET_ETHERNET 6 select ETHERNET
7 select NET_CORE 7 select NET_CORE
8 select MII 8 select MII
9 help 9 help
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 6c28582fb98f..361d54019bb0 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -4,8 +4,8 @@ config M68K
4 select HAVE_IDE 4 select HAVE_IDE
5 select HAVE_AOUT if MMU 5 select HAVE_AOUT if MMU
6 select GENERIC_ATOMIC64 if MMU 6 select GENERIC_ATOMIC64 if MMU
7 select HAVE_GENERIC_HARDIRQS if !MMU 7 select HAVE_GENERIC_HARDIRQS
8 select GENERIC_IRQ_SHOW if !MMU 8 select GENERIC_IRQ_SHOW
9 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS 9 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
10 10
11config RWSEM_GENERIC_SPINLOCK 11config RWSEM_GENERIC_SPINLOCK
diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus
index 8294f0c1785e..3adb499584fb 100644
--- a/arch/m68k/Kconfig.bus
+++ b/arch/m68k/Kconfig.bus
@@ -2,6 +2,15 @@ if MMU
2 2
3comment "Bus Support" 3comment "Bus Support"
4 4
5config DIO
6 bool "DIO bus support"
7 depends on HP300
8 default y
9 help
10 Say Y here to enable support for the "DIO" expansion bus used in
11 HP300 machines. If you are using such a system you almost certainly
12 want this.
13
5config NUBUS 14config NUBUS
6 bool 15 bool
7 depends on MAC 16 depends on MAC
diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices
index d214034be6a6..6033f5d4e67e 100644
--- a/arch/m68k/Kconfig.devices
+++ b/arch/m68k/Kconfig.devices
@@ -24,6 +24,37 @@ config PROC_HARDWARE
24 including the model, CPU, MMU, clock speed, BogoMIPS rating, 24 including the model, CPU, MMU, clock speed, BogoMIPS rating,
25 and memory size. 25 and memory size.
26 26
27config NATFEAT
28 bool "ARAnyM emulator support"
29 depends on ATARI
30 help
31 This option enables support for ARAnyM native features, such as
32 access to a disk image as /dev/hda.
33
34config NFBLOCK
35 tristate "NatFeat block device support"
36 depends on BLOCK && NATFEAT
37 help
38 Say Y to include support for the ARAnyM NatFeat block device
39 which allows direct access to the hard drives without using
40 the hardware emulation.
41
42config NFCON
43 tristate "NatFeat console driver"
44 depends on NATFEAT
45 help
46 Say Y to include support for the ARAnyM NatFeat console driver
47 which allows the console output to be redirected to the stderr
48 output of ARAnyM.
49
50config NFETH
51 tristate "NatFeat Ethernet support"
52 depends on ETHERNET && NATFEAT
53 help
54 Say Y to include support for the ARAnyM NatFeat network device
55 which will emulate a regular ethernet device while presenting an
56 ethertap device to the host system.
57
27endmenu 58endmenu
28 59
29menu "Character devices" 60menu "Character devices"
diff --git a/arch/m68k/amiga/amiints.c b/arch/m68k/amiga/amiints.c
index c5b5212cc3f9..47b5f90002ab 100644
--- a/arch/m68k/amiga/amiints.c
+++ b/arch/m68k/amiga/amiints.c
@@ -1,43 +1,15 @@
1/* 1/*
2 * linux/arch/m68k/amiga/amiints.c -- Amiga Linux interrupt handling code 2 * Amiga Linux interrupt handling code
3 * 3 *
4 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file COPYING in the main directory of this archive 5 * License. See the file COPYING in the main directory of this archive
6 * for more details. 6 * for more details.
7 *
8 * 11/07/96: rewritten interrupt handling, irq lists are exists now only for
9 * this sources where it makes sense (VERTB/PORTS/EXTER) and you must
10 * be careful that dev_id for this sources is unique since this the
11 * only possibility to distinguish between different handlers for
12 * free_irq. irq lists also have different irq flags:
13 * - IRQ_FLG_FAST: handler is inserted at top of list (after other
14 * fast handlers)
15 * - IRQ_FLG_SLOW: handler is inserted at bottom of list and before
16 * they're executed irq level is set to the previous
17 * one, but handlers don't need to be reentrant, if
18 * reentrance occurred, slow handlers will be just
19 * called again.
20 * The whole interrupt handling for CIAs is moved to cia.c
21 * /Roman Zippel
22 *
23 * 07/08/99: rewamp of the interrupt handling - we now have two types of
24 * interrupts, normal and fast handlers, fast handlers being
25 * marked with IRQF_DISABLED and runs with all other interrupts
26 * disabled. Normal interrupts disable their own source but
27 * run with all other interrupt sources enabled.
28 * PORTS and EXTER interrupts are always shared even if the
29 * drivers do not explicitly mark this when calling
30 * request_irq which they really should do.
31 * This is similar to the way interrupts are handled on all
32 * other architectures and makes a ton of sense besides
33 * having the advantage of making it easier to share
34 * drivers.
35 * /Jes
36 */ 7 */
37 8
38#include <linux/init.h> 9#include <linux/init.h>
39#include <linux/interrupt.h> 10#include <linux/interrupt.h>
40#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/irq.h>
41 13
42#include <asm/irq.h> 14#include <asm/irq.h>
43#include <asm/traps.h> 15#include <asm/traps.h>
@@ -45,56 +17,6 @@
45#include <asm/amigaints.h> 17#include <asm/amigaints.h>
46#include <asm/amipcmcia.h> 18#include <asm/amipcmcia.h>
47 19
48static void amiga_enable_irq(unsigned int irq);
49static void amiga_disable_irq(unsigned int irq);
50static irqreturn_t ami_int1(int irq, void *dev_id);
51static irqreturn_t ami_int3(int irq, void *dev_id);
52static irqreturn_t ami_int4(int irq, void *dev_id);
53static irqreturn_t ami_int5(int irq, void *dev_id);
54
55static struct irq_controller amiga_irq_controller = {
56 .name = "amiga",
57 .lock = __SPIN_LOCK_UNLOCKED(amiga_irq_controller.lock),
58 .enable = amiga_enable_irq,
59 .disable = amiga_disable_irq,
60};
61
62/*
63 * void amiga_init_IRQ(void)
64 *
65 * Parameters: None
66 *
67 * Returns: Nothing
68 *
69 * This function should be called during kernel startup to initialize
70 * the amiga IRQ handling routines.
71 */
72
73void __init amiga_init_IRQ(void)
74{
75 if (request_irq(IRQ_AUTO_1, ami_int1, 0, "int1", NULL))
76 pr_err("Couldn't register int%d\n", 1);
77 if (request_irq(IRQ_AUTO_3, ami_int3, 0, "int3", NULL))
78 pr_err("Couldn't register int%d\n", 3);
79 if (request_irq(IRQ_AUTO_4, ami_int4, 0, "int4", NULL))
80 pr_err("Couldn't register int%d\n", 4);
81 if (request_irq(IRQ_AUTO_5, ami_int5, 0, "int5", NULL))
82 pr_err("Couldn't register int%d\n", 5);
83
84 m68k_setup_irq_controller(&amiga_irq_controller, IRQ_USER, AMI_STD_IRQS);
85
86 /* turn off PCMCIA interrupts */
87 if (AMIGAHW_PRESENT(PCMCIA))
88 gayle.inten = GAYLE_IRQ_IDE;
89
90 /* turn off all interrupts and enable the master interrupt bit */
91 amiga_custom.intena = 0x7fff;
92 amiga_custom.intreq = 0x7fff;
93 amiga_custom.intena = IF_SETCLR | IF_INTEN;
94
95 cia_init_IRQ(&ciaa_base);
96 cia_init_IRQ(&ciab_base);
97}
98 20
99/* 21/*
100 * Enable/disable a particular machine specific interrupt source. 22 * Enable/disable a particular machine specific interrupt source.
@@ -103,112 +25,150 @@ void __init amiga_init_IRQ(void)
103 * internal data, that may not be changed by the interrupt at the same time. 25 * internal data, that may not be changed by the interrupt at the same time.
104 */ 26 */
105 27
106static void amiga_enable_irq(unsigned int irq) 28static void amiga_irq_enable(struct irq_data *data)
107{ 29{
108 amiga_custom.intena = IF_SETCLR | (1 << (irq - IRQ_USER)); 30 amiga_custom.intena = IF_SETCLR | (1 << (data->irq - IRQ_USER));
109} 31}
110 32
111static void amiga_disable_irq(unsigned int irq) 33static void amiga_irq_disable(struct irq_data *data)
112{ 34{
113 amiga_custom.intena = 1 << (irq - IRQ_USER); 35 amiga_custom.intena = 1 << (data->irq - IRQ_USER);
114} 36}
115 37
38static struct irq_chip amiga_irq_chip = {
39 .name = "amiga",
40 .irq_enable = amiga_irq_enable,
41 .irq_disable = amiga_irq_disable,
42};
43
44
116/* 45/*
117 * The builtin Amiga hardware interrupt handlers. 46 * The builtin Amiga hardware interrupt handlers.
118 */ 47 */
119 48
120static irqreturn_t ami_int1(int irq, void *dev_id) 49static void ami_int1(unsigned int irq, struct irq_desc *desc)
121{ 50{
122 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; 51 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
123 52
124 /* if serial transmit buffer empty, interrupt */ 53 /* if serial transmit buffer empty, interrupt */
125 if (ints & IF_TBE) { 54 if (ints & IF_TBE) {
126 amiga_custom.intreq = IF_TBE; 55 amiga_custom.intreq = IF_TBE;
127 m68k_handle_int(IRQ_AMIGA_TBE); 56 generic_handle_irq(IRQ_AMIGA_TBE);
128 } 57 }
129 58
130 /* if floppy disk transfer complete, interrupt */ 59 /* if floppy disk transfer complete, interrupt */
131 if (ints & IF_DSKBLK) { 60 if (ints & IF_DSKBLK) {
132 amiga_custom.intreq = IF_DSKBLK; 61 amiga_custom.intreq = IF_DSKBLK;
133 m68k_handle_int(IRQ_AMIGA_DSKBLK); 62 generic_handle_irq(IRQ_AMIGA_DSKBLK);
134 } 63 }
135 64
136 /* if software interrupt set, interrupt */ 65 /* if software interrupt set, interrupt */
137 if (ints & IF_SOFT) { 66 if (ints & IF_SOFT) {
138 amiga_custom.intreq = IF_SOFT; 67 amiga_custom.intreq = IF_SOFT;
139 m68k_handle_int(IRQ_AMIGA_SOFT); 68 generic_handle_irq(IRQ_AMIGA_SOFT);
140 } 69 }
141 return IRQ_HANDLED;
142} 70}
143 71
144static irqreturn_t ami_int3(int irq, void *dev_id) 72static void ami_int3(unsigned int irq, struct irq_desc *desc)
145{ 73{
146 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; 74 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
147 75
148 /* if a blitter interrupt */ 76 /* if a blitter interrupt */
149 if (ints & IF_BLIT) { 77 if (ints & IF_BLIT) {
150 amiga_custom.intreq = IF_BLIT; 78 amiga_custom.intreq = IF_BLIT;
151 m68k_handle_int(IRQ_AMIGA_BLIT); 79 generic_handle_irq(IRQ_AMIGA_BLIT);
152 } 80 }
153 81
154 /* if a copper interrupt */ 82 /* if a copper interrupt */
155 if (ints & IF_COPER) { 83 if (ints & IF_COPER) {
156 amiga_custom.intreq = IF_COPER; 84 amiga_custom.intreq = IF_COPER;
157 m68k_handle_int(IRQ_AMIGA_COPPER); 85 generic_handle_irq(IRQ_AMIGA_COPPER);
158 } 86 }
159 87
160 /* if a vertical blank interrupt */ 88 /* if a vertical blank interrupt */
161 if (ints & IF_VERTB) { 89 if (ints & IF_VERTB) {
162 amiga_custom.intreq = IF_VERTB; 90 amiga_custom.intreq = IF_VERTB;
163 m68k_handle_int(IRQ_AMIGA_VERTB); 91 generic_handle_irq(IRQ_AMIGA_VERTB);
164 } 92 }
165 return IRQ_HANDLED;
166} 93}
167 94
168static irqreturn_t ami_int4(int irq, void *dev_id) 95static void ami_int4(unsigned int irq, struct irq_desc *desc)
169{ 96{
170 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; 97 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
171 98
172 /* if audio 0 interrupt */ 99 /* if audio 0 interrupt */
173 if (ints & IF_AUD0) { 100 if (ints & IF_AUD0) {
174 amiga_custom.intreq = IF_AUD0; 101 amiga_custom.intreq = IF_AUD0;
175 m68k_handle_int(IRQ_AMIGA_AUD0); 102 generic_handle_irq(IRQ_AMIGA_AUD0);
176 } 103 }
177 104
178 /* if audio 1 interrupt */ 105 /* if audio 1 interrupt */
179 if (ints & IF_AUD1) { 106 if (ints & IF_AUD1) {
180 amiga_custom.intreq = IF_AUD1; 107 amiga_custom.intreq = IF_AUD1;
181 m68k_handle_int(IRQ_AMIGA_AUD1); 108 generic_handle_irq(IRQ_AMIGA_AUD1);
182 } 109 }
183 110
184 /* if audio 2 interrupt */ 111 /* if audio 2 interrupt */
185 if (ints & IF_AUD2) { 112 if (ints & IF_AUD2) {
186 amiga_custom.intreq = IF_AUD2; 113 amiga_custom.intreq = IF_AUD2;
187 m68k_handle_int(IRQ_AMIGA_AUD2); 114 generic_handle_irq(IRQ_AMIGA_AUD2);
188 } 115 }
189 116
190 /* if audio 3 interrupt */ 117 /* if audio 3 interrupt */
191 if (ints & IF_AUD3) { 118 if (ints & IF_AUD3) {
192 amiga_custom.intreq = IF_AUD3; 119 amiga_custom.intreq = IF_AUD3;
193 m68k_handle_int(IRQ_AMIGA_AUD3); 120 generic_handle_irq(IRQ_AMIGA_AUD3);
194 } 121 }
195 return IRQ_HANDLED;
196} 122}
197 123
198static irqreturn_t ami_int5(int irq, void *dev_id) 124static void ami_int5(unsigned int irq, struct irq_desc *desc)
199{ 125{
200 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; 126 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
201 127
202 /* if serial receive buffer full interrupt */ 128 /* if serial receive buffer full interrupt */
203 if (ints & IF_RBF) { 129 if (ints & IF_RBF) {
204 /* acknowledge of IF_RBF must be done by the serial interrupt */ 130 /* acknowledge of IF_RBF must be done by the serial interrupt */
205 m68k_handle_int(IRQ_AMIGA_RBF); 131 generic_handle_irq(IRQ_AMIGA_RBF);
206 } 132 }
207 133
208 /* if a disk sync interrupt */ 134 /* if a disk sync interrupt */
209 if (ints & IF_DSKSYN) { 135 if (ints & IF_DSKSYN) {
210 amiga_custom.intreq = IF_DSKSYN; 136 amiga_custom.intreq = IF_DSKSYN;
211 m68k_handle_int(IRQ_AMIGA_DSKSYN); 137 generic_handle_irq(IRQ_AMIGA_DSKSYN);
212 } 138 }
213 return IRQ_HANDLED; 139}
140
141
142/*
143 * void amiga_init_IRQ(void)
144 *
145 * Parameters: None
146 *
147 * Returns: Nothing
148 *
149 * This function should be called during kernel startup to initialize
150 * the amiga IRQ handling routines.
151 */
152
153void __init amiga_init_IRQ(void)
154{
155 m68k_setup_irq_controller(&amiga_irq_chip, handle_simple_irq, IRQ_USER,
156 AMI_STD_IRQS);
157
158 irq_set_chained_handler(IRQ_AUTO_1, ami_int1);
159 irq_set_chained_handler(IRQ_AUTO_3, ami_int3);
160 irq_set_chained_handler(IRQ_AUTO_4, ami_int4);
161 irq_set_chained_handler(IRQ_AUTO_5, ami_int5);
162
163 /* turn off PCMCIA interrupts */
164 if (AMIGAHW_PRESENT(PCMCIA))
165 gayle.inten = GAYLE_IRQ_IDE;
166
167 /* turn off all interrupts and enable the master interrupt bit */
168 amiga_custom.intena = 0x7fff;
169 amiga_custom.intreq = 0x7fff;
170 amiga_custom.intena = IF_SETCLR | IF_INTEN;
171
172 cia_init_IRQ(&ciaa_base);
173 cia_init_IRQ(&ciab_base);
214} 174}
diff --git a/arch/m68k/amiga/cia.c b/arch/m68k/amiga/cia.c
index ecd0f7ca6f0e..18c0e29976e3 100644
--- a/arch/m68k/amiga/cia.c
+++ b/arch/m68k/amiga/cia.c
@@ -93,13 +93,14 @@ static irqreturn_t cia_handler(int irq, void *dev_id)
93 amiga_custom.intreq = base->int_mask; 93 amiga_custom.intreq = base->int_mask;
94 for (; ints; mach_irq++, ints >>= 1) { 94 for (; ints; mach_irq++, ints >>= 1) {
95 if (ints & 1) 95 if (ints & 1)
96 m68k_handle_int(mach_irq); 96 generic_handle_irq(mach_irq);
97 } 97 }
98 return IRQ_HANDLED; 98 return IRQ_HANDLED;
99} 99}
100 100
101static void cia_enable_irq(unsigned int irq) 101static void cia_irq_enable(struct irq_data *data)
102{ 102{
103 unsigned int irq = data->irq;
103 unsigned char mask; 104 unsigned char mask;
104 105
105 if (irq >= IRQ_AMIGA_CIAB) { 106 if (irq >= IRQ_AMIGA_CIAB) {
@@ -113,19 +114,20 @@ static void cia_enable_irq(unsigned int irq)
113 } 114 }
114} 115}
115 116
116static void cia_disable_irq(unsigned int irq) 117static void cia_irq_disable(struct irq_data *data)
117{ 118{
119 unsigned int irq = data->irq;
120
118 if (irq >= IRQ_AMIGA_CIAB) 121 if (irq >= IRQ_AMIGA_CIAB)
119 cia_able_irq(&ciab_base, 1 << (irq - IRQ_AMIGA_CIAB)); 122 cia_able_irq(&ciab_base, 1 << (irq - IRQ_AMIGA_CIAB));
120 else 123 else
121 cia_able_irq(&ciaa_base, 1 << (irq - IRQ_AMIGA_CIAA)); 124 cia_able_irq(&ciaa_base, 1 << (irq - IRQ_AMIGA_CIAA));
122} 125}
123 126
124static struct irq_controller cia_irq_controller = { 127static struct irq_chip cia_irq_chip = {
125 .name = "cia", 128 .name = "cia",
126 .lock = __SPIN_LOCK_UNLOCKED(cia_irq_controller.lock), 129 .irq_enable = cia_irq_enable,
127 .enable = cia_enable_irq, 130 .irq_disable = cia_irq_disable,
128 .disable = cia_disable_irq,
129}; 131};
130 132
131/* 133/*
@@ -134,9 +136,9 @@ static struct irq_controller cia_irq_controller = {
134 * into this chain. 136 * into this chain.
135 */ 137 */
136 138
137static void auto_enable_irq(unsigned int irq) 139static void auto_irq_enable(struct irq_data *data)
138{ 140{
139 switch (irq) { 141 switch (data->irq) {
140 case IRQ_AUTO_2: 142 case IRQ_AUTO_2:
141 amiga_custom.intena = IF_SETCLR | IF_PORTS; 143 amiga_custom.intena = IF_SETCLR | IF_PORTS;
142 break; 144 break;
@@ -146,9 +148,9 @@ static void auto_enable_irq(unsigned int irq)
146 } 148 }
147} 149}
148 150
149static void auto_disable_irq(unsigned int irq) 151static void auto_irq_disable(struct irq_data *data)
150{ 152{
151 switch (irq) { 153 switch (data->irq) {
152 case IRQ_AUTO_2: 154 case IRQ_AUTO_2:
153 amiga_custom.intena = IF_PORTS; 155 amiga_custom.intena = IF_PORTS;
154 break; 156 break;
@@ -158,24 +160,25 @@ static void auto_disable_irq(unsigned int irq)
158 } 160 }
159} 161}
160 162
161static struct irq_controller auto_irq_controller = { 163static struct irq_chip auto_irq_chip = {
162 .name = "auto", 164 .name = "auto",
163 .lock = __SPIN_LOCK_UNLOCKED(auto_irq_controller.lock), 165 .irq_enable = auto_irq_enable,
164 .enable = auto_enable_irq, 166 .irq_disable = auto_irq_disable,
165 .disable = auto_disable_irq,
166}; 167};
167 168
168void __init cia_init_IRQ(struct ciabase *base) 169void __init cia_init_IRQ(struct ciabase *base)
169{ 170{
170 m68k_setup_irq_controller(&cia_irq_controller, base->cia_irq, CIA_IRQS); 171 m68k_setup_irq_controller(&cia_irq_chip, handle_simple_irq,
172 base->cia_irq, CIA_IRQS);
171 173
172 /* clear any pending interrupt and turn off all interrupts */ 174 /* clear any pending interrupt and turn off all interrupts */
173 cia_set_irq(base, CIA_ICR_ALL); 175 cia_set_irq(base, CIA_ICR_ALL);
174 cia_able_irq(base, CIA_ICR_ALL); 176 cia_able_irq(base, CIA_ICR_ALL);
175 177
176 /* override auto int and install CIA handler */ 178 /* override auto int and install CIA handler */
177 m68k_setup_irq_controller(&auto_irq_controller, base->handler_irq, 1); 179 m68k_setup_irq_controller(&auto_irq_chip, handle_simple_irq,
178 m68k_irq_startup(base->handler_irq); 180 base->handler_irq, 1);
181 m68k_irq_startup_irq(base->handler_irq);
179 if (request_irq(base->handler_irq, cia_handler, IRQF_SHARED, 182 if (request_irq(base->handler_irq, cia_handler, IRQF_SHARED,
180 base->name, base)) 183 base->name, base))
181 pr_err("Couldn't register %s interrupt\n", base->name); 184 pr_err("Couldn't register %s interrupt\n", base->name);
diff --git a/arch/m68k/apollo/dn_ints.c b/arch/m68k/apollo/dn_ints.c
index 5d47f3aa3810..17be1e7e2df2 100644
--- a/arch/m68k/apollo/dn_ints.c
+++ b/arch/m68k/apollo/dn_ints.c
@@ -1,19 +1,13 @@
1#include <linux/interrupt.h> 1#include <linux/interrupt.h>
2#include <linux/irq.h>
2 3
3#include <asm/irq.h>
4#include <asm/traps.h> 4#include <asm/traps.h>
5#include <asm/apollohw.h> 5#include <asm/apollohw.h>
6 6
7void dn_process_int(unsigned int irq, struct pt_regs *fp) 7unsigned int apollo_irq_startup(struct irq_data *data)
8{ 8{
9 __m68k_handle_int(irq, fp); 9 unsigned int irq = data->irq;
10 10
11 *(volatile unsigned char *)(pica)=0x20;
12 *(volatile unsigned char *)(picb)=0x20;
13}
14
15int apollo_irq_startup(unsigned int irq)
16{
17 if (irq < 8) 11 if (irq < 8)
18 *(volatile unsigned char *)(pica+1) &= ~(1 << irq); 12 *(volatile unsigned char *)(pica+1) &= ~(1 << irq);
19 else 13 else
@@ -21,24 +15,33 @@ int apollo_irq_startup(unsigned int irq)
21 return 0; 15 return 0;
22} 16}
23 17
24void apollo_irq_shutdown(unsigned int irq) 18void apollo_irq_shutdown(struct irq_data *data)
25{ 19{
20 unsigned int irq = data->irq;
21
26 if (irq < 8) 22 if (irq < 8)
27 *(volatile unsigned char *)(pica+1) |= (1 << irq); 23 *(volatile unsigned char *)(pica+1) |= (1 << irq);
28 else 24 else
29 *(volatile unsigned char *)(picb+1) |= (1 << (irq - 8)); 25 *(volatile unsigned char *)(picb+1) |= (1 << (irq - 8));
30} 26}
31 27
32static struct irq_controller apollo_irq_controller = { 28void apollo_irq_eoi(struct irq_data *data)
29{
30 *(volatile unsigned char *)(pica) = 0x20;
31 *(volatile unsigned char *)(picb) = 0x20;
32}
33
34static struct irq_chip apollo_irq_chip = {
33 .name = "apollo", 35 .name = "apollo",
34 .lock = __SPIN_LOCK_UNLOCKED(apollo_irq_controller.lock), 36 .irq_startup = apollo_irq_startup,
35 .startup = apollo_irq_startup, 37 .irq_shutdown = apollo_irq_shutdown,
36 .shutdown = apollo_irq_shutdown, 38 .irq_eoi = apollo_irq_eoi,
37}; 39};
38 40
39 41
40void __init dn_init_IRQ(void) 42void __init dn_init_IRQ(void)
41{ 43{
42 m68k_setup_user_interrupt(VEC_USER + 96, 16, dn_process_int); 44 m68k_setup_user_interrupt(VEC_USER + 96, 16);
43 m68k_setup_irq_controller(&apollo_irq_controller, IRQ_APOLLO, 16); 45 m68k_setup_irq_controller(&apollo_irq_chip, handle_fasteoi_irq,
46 IRQ_APOLLO, 16);
44} 47}
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 26a804e67bce..6d196dadfdbc 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -60,243 +60,7 @@
60 * <asm/atariints.h>): Autovector interrupts are 1..7, then follow ST-MFP, 60 * <asm/atariints.h>): Autovector interrupts are 1..7, then follow ST-MFP,
61 * TT-MFP, SCC, and finally VME interrupts. Vector numbers for the latter can 61 * TT-MFP, SCC, and finally VME interrupts. Vector numbers for the latter can
62 * be allocated by atari_register_vme_int(). 62 * be allocated by atari_register_vme_int().
63 *
64 * Each interrupt can be of three types:
65 *
66 * - SLOW: The handler runs with all interrupts enabled, except the one it
67 * was called by (to avoid reentering). This should be the usual method.
68 * But it is currently possible only for MFP ints, since only the MFP
69 * offers an easy way to mask interrupts.
70 *
71 * - FAST: The handler runs with all interrupts disabled. This should be used
72 * only for really fast handlers, that just do actions immediately
73 * necessary, and let the rest do a bottom half or task queue.
74 *
75 * - PRIORITIZED: The handler can be interrupted by higher-level ints
76 * (greater IPL, no MFP priorities!). This is the method of choice for ints
77 * which should be slow, but are not from a MFP.
78 *
79 * The feature of more than one handler for one int source is still there, but
80 * only applicable if all handers are of the same type. To not slow down
81 * processing of ints with only one handler by the chaining feature, the list
82 * calling function atari_call_irq_list() is only plugged in at the time the
83 * second handler is registered.
84 *
85 * Implementation notes: For fast-as-possible int handling, there are separate
86 * entry points for each type (slow/fast/prio). The assembler handler calls
87 * the irq directly in the usual case, no C wrapper is involved. In case of
88 * multiple handlers, atari_call_irq_list() is registered as handler and calls
89 * in turn the real irq's. To ease access from assembler level to the irq
90 * function pointer and accompanying data, these two are stored in a separate
91 * array, irq_handler[]. The rest of data (type, name) are put into a second
92 * array, irq_param, that is accessed from C only. For each slow interrupt (32
93 * in all) there are separate handler functions, which makes it possible to
94 * hard-code the MFP register address and value, are necessary to mask the
95 * int. If there'd be only one generic function, lots of calculations would be
96 * needed to determine MFP register and int mask from the vector number :-(
97 *
98 * Furthermore, slow ints may not lower the IPL below its previous value
99 * (before the int happened). This is needed so that an int of class PRIO, on
100 * that this int may be stacked, cannot be reentered. This feature is
101 * implemented as follows: If the stack frame format is 1 (throwaway), the int
102 * is not stacked, and the IPL is anded with 0xfbff, resulting in a new level
103 * 2, which still blocks the HSYNC, but no interrupts of interest. If the
104 * frame format is 0, the int is nested, and the old IPL value can be found in
105 * the sr copy in the frame.
106 */
107
108#if 0
109
110#define NUM_INT_SOURCES (8 + NUM_ATARI_SOURCES)
111
112typedef void (*asm_irq_handler)(void);
113
114struct irqhandler {
115 irqreturn_t (*handler)(int, void *, struct pt_regs *);
116 void *dev_id;
117};
118
119struct irqparam {
120 unsigned long flags;
121 const char *devname;
122};
123
124/*
125 * Array with irq's and their parameter data. This array is accessed from low
126 * level assembler code, so an element size of 8 allows usage of index scaling
127 * addressing mode.
128 */ 63 */
129static struct irqhandler irq_handler[NUM_INT_SOURCES];
130
131/*
132 * This array hold the rest of parameters of int handlers: type
133 * (slow,fast,prio) and the name of the handler. These values are only
134 * accessed from C
135 */
136static struct irqparam irq_param[NUM_INT_SOURCES];
137
138/* check for valid int number (complex, sigh...) */
139#define IS_VALID_INTNO(n) \
140 ((n) > 0 && \
141 /* autovec and ST-MFP ok anyway */ \
142 (((n) < TTMFP_SOURCE_BASE) || \
143 /* TT-MFP ok if present */ \
144 ((n) >= TTMFP_SOURCE_BASE && (n) < SCC_SOURCE_BASE && \
145 ATARIHW_PRESENT(TT_MFP)) || \
146 /* SCC ok if present and number even */ \
147 ((n) >= SCC_SOURCE_BASE && (n) < VME_SOURCE_BASE && \
148 !((n) & 1) && ATARIHW_PRESENT(SCC)) || \
149 /* greater numbers ok if they are registered VME vectors */ \
150 ((n) >= VME_SOURCE_BASE && (n) < VME_SOURCE_BASE + VME_MAX_SOURCES && \
151 free_vme_vec_bitmap & (1 << ((n) - VME_SOURCE_BASE)))))
152
153
154/*
155 * Here start the assembler entry points for interrupts
156 */
157
158#define IRQ_NAME(nr) atari_slow_irq_##nr##_handler(void)
159
160#define BUILD_SLOW_IRQ(n) \
161asmlinkage void IRQ_NAME(n); \
162/* Dummy function to allow asm with operands. */ \
163void atari_slow_irq_##n##_dummy (void) { \
164__asm__ (__ALIGN_STR "\n" \
165"atari_slow_irq_" #n "_handler:\t" \
166" addl %6,%5\n" /* preempt_count() += HARDIRQ_OFFSET */ \
167 SAVE_ALL_INT "\n" \
168 GET_CURRENT(%%d0) "\n" \
169" andb #~(1<<(%c3&7)),%a4:w\n" /* mask this interrupt */ \
170 /* get old IPL from stack frame */ \
171" bfextu %%sp@(%c2){#5,#3},%%d0\n" \
172" movew %%sr,%%d1\n" \
173" bfins %%d0,%%d1{#21,#3}\n" \
174" movew %%d1,%%sr\n" /* set IPL = previous value */ \
175" addql #1,%a0\n" \
176" lea %a1,%%a0\n" \
177" pea %%sp@\n" /* push addr of frame */ \
178" movel %%a0@(4),%%sp@-\n" /* push handler data */ \
179" pea (%c3+8)\n" /* push int number */ \
180" movel %%a0@,%%a0\n" \
181" jbsr %%a0@\n" /* call the handler */ \
182" addql #8,%%sp\n" \
183" addql #4,%%sp\n" \
184" orw #0x0600,%%sr\n" \
185" andw #0xfeff,%%sr\n" /* set IPL = 6 again */ \
186" orb #(1<<(%c3&7)),%a4:w\n" /* now unmask the int again */ \
187" jbra ret_from_interrupt\n" \
188 : : "i" (&kstat_cpu(0).irqs[n+8]), "i" (&irq_handler[n+8]), \
189 "n" (PT_OFF_SR), "n" (n), \
190 "i" (n & 8 ? (n & 16 ? &tt_mfp.int_mk_a : &st_mfp.int_mk_a) \
191 : (n & 16 ? &tt_mfp.int_mk_b : &st_mfp.int_mk_b)), \
192 "m" (preempt_count()), "di" (HARDIRQ_OFFSET) \
193); \
194 for (;;); /* fake noreturn */ \
195}
196
197BUILD_SLOW_IRQ(0);
198BUILD_SLOW_IRQ(1);
199BUILD_SLOW_IRQ(2);
200BUILD_SLOW_IRQ(3);
201BUILD_SLOW_IRQ(4);
202BUILD_SLOW_IRQ(5);
203BUILD_SLOW_IRQ(6);
204BUILD_SLOW_IRQ(7);
205BUILD_SLOW_IRQ(8);
206BUILD_SLOW_IRQ(9);
207BUILD_SLOW_IRQ(10);
208BUILD_SLOW_IRQ(11);
209BUILD_SLOW_IRQ(12);
210BUILD_SLOW_IRQ(13);
211BUILD_SLOW_IRQ(14);
212BUILD_SLOW_IRQ(15);
213BUILD_SLOW_IRQ(16);
214BUILD_SLOW_IRQ(17);
215BUILD_SLOW_IRQ(18);
216BUILD_SLOW_IRQ(19);
217BUILD_SLOW_IRQ(20);
218BUILD_SLOW_IRQ(21);
219BUILD_SLOW_IRQ(22);
220BUILD_SLOW_IRQ(23);
221BUILD_SLOW_IRQ(24);
222BUILD_SLOW_IRQ(25);
223BUILD_SLOW_IRQ(26);
224BUILD_SLOW_IRQ(27);
225BUILD_SLOW_IRQ(28);
226BUILD_SLOW_IRQ(29);
227BUILD_SLOW_IRQ(30);
228BUILD_SLOW_IRQ(31);
229
230asm_irq_handler slow_handlers[32] = {
231 [0] = atari_slow_irq_0_handler,
232 [1] = atari_slow_irq_1_handler,
233 [2] = atari_slow_irq_2_handler,
234 [3] = atari_slow_irq_3_handler,
235 [4] = atari_slow_irq_4_handler,
236 [5] = atari_slow_irq_5_handler,
237 [6] = atari_slow_irq_6_handler,
238 [7] = atari_slow_irq_7_handler,
239 [8] = atari_slow_irq_8_handler,
240 [9] = atari_slow_irq_9_handler,
241 [10] = atari_slow_irq_10_handler,
242 [11] = atari_slow_irq_11_handler,
243 [12] = atari_slow_irq_12_handler,
244 [13] = atari_slow_irq_13_handler,
245 [14] = atari_slow_irq_14_handler,
246 [15] = atari_slow_irq_15_handler,
247 [16] = atari_slow_irq_16_handler,
248 [17] = atari_slow_irq_17_handler,
249 [18] = atari_slow_irq_18_handler,
250 [19] = atari_slow_irq_19_handler,
251 [20] = atari_slow_irq_20_handler,
252 [21] = atari_slow_irq_21_handler,
253 [22] = atari_slow_irq_22_handler,
254 [23] = atari_slow_irq_23_handler,
255 [24] = atari_slow_irq_24_handler,
256 [25] = atari_slow_irq_25_handler,
257 [26] = atari_slow_irq_26_handler,
258 [27] = atari_slow_irq_27_handler,
259 [28] = atari_slow_irq_28_handler,
260 [29] = atari_slow_irq_29_handler,
261 [30] = atari_slow_irq_30_handler,
262 [31] = atari_slow_irq_31_handler
263};
264
265asmlinkage void atari_fast_irq_handler( void );
266asmlinkage void atari_prio_irq_handler( void );
267
268/* Dummy function to allow asm with operands. */
269void atari_fast_prio_irq_dummy (void) {
270__asm__ (__ALIGN_STR "\n"
271"atari_fast_irq_handler:\n\t"
272 "orw #0x700,%%sr\n" /* disable all interrupts */
273"atari_prio_irq_handler:\n\t"
274 "addl %3,%2\n\t" /* preempt_count() += HARDIRQ_OFFSET */
275 SAVE_ALL_INT "\n\t"
276 GET_CURRENT(%%d0) "\n\t"
277 /* get vector number from stack frame and convert to source */
278 "bfextu %%sp@(%c1){#4,#10},%%d0\n\t"
279 "subw #(0x40-8),%%d0\n\t"
280 "jpl 1f\n\t"
281 "addw #(0x40-8-0x18),%%d0\n"
282 "1:\tlea %a0,%%a0\n\t"
283 "addql #1,%%a0@(%%d0:l:4)\n\t"
284 "lea irq_handler,%%a0\n\t"
285 "lea %%a0@(%%d0:l:8),%%a0\n\t"
286 "pea %%sp@\n\t" /* push frame address */
287 "movel %%a0@(4),%%sp@-\n\t" /* push handler data */
288 "movel %%d0,%%sp@-\n\t" /* push int number */
289 "movel %%a0@,%%a0\n\t"
290 "jsr %%a0@\n\t" /* and call the handler */
291 "addql #8,%%sp\n\t"
292 "addql #4,%%sp\n\t"
293 "jbra ret_from_interrupt"
294 : : "i" (&kstat_cpu(0).irqs), "n" (PT_OFF_FORMATVEC),
295 "m" (preempt_count()), "di" (HARDIRQ_OFFSET)
296);
297 for (;;);
298}
299#endif
300 64
301/* 65/*
302 * Bitmap for free interrupt vector numbers 66 * Bitmap for free interrupt vector numbers
@@ -320,31 +84,44 @@ extern void atari_microwire_cmd(int cmd);
320 84
321extern int atari_SCC_reset_done; 85extern int atari_SCC_reset_done;
322 86
323static int atari_startup_irq(unsigned int irq) 87static unsigned int atari_irq_startup(struct irq_data *data)
324{ 88{
325 m68k_irq_startup(irq); 89 unsigned int irq = data->irq;
90
91 m68k_irq_startup(data);
326 atari_turnon_irq(irq); 92 atari_turnon_irq(irq);
327 atari_enable_irq(irq); 93 atari_enable_irq(irq);
328 return 0; 94 return 0;
329} 95}
330 96
331static void atari_shutdown_irq(unsigned int irq) 97static void atari_irq_shutdown(struct irq_data *data)
332{ 98{
99 unsigned int irq = data->irq;
100
333 atari_disable_irq(irq); 101 atari_disable_irq(irq);
334 atari_turnoff_irq(irq); 102 atari_turnoff_irq(irq);
335 m68k_irq_shutdown(irq); 103 m68k_irq_shutdown(data);
336 104
337 if (irq == IRQ_AUTO_4) 105 if (irq == IRQ_AUTO_4)
338 vectors[VEC_INT4] = falcon_hblhandler; 106 vectors[VEC_INT4] = falcon_hblhandler;
339} 107}
340 108
341static struct irq_controller atari_irq_controller = { 109static void atari_irq_enable(struct irq_data *data)
110{
111 atari_enable_irq(data->irq);
112}
113
114static void atari_irq_disable(struct irq_data *data)
115{
116 atari_disable_irq(data->irq);
117}
118
119static struct irq_chip atari_irq_chip = {
342 .name = "atari", 120 .name = "atari",
343 .lock = __SPIN_LOCK_UNLOCKED(atari_irq_controller.lock), 121 .irq_startup = atari_irq_startup,
344 .startup = atari_startup_irq, 122 .irq_shutdown = atari_irq_shutdown,
345 .shutdown = atari_shutdown_irq, 123 .irq_enable = atari_irq_enable,
346 .enable = atari_enable_irq, 124 .irq_disable = atari_irq_disable,
347 .disable = atari_disable_irq,
348}; 125};
349 126
350/* 127/*
@@ -360,8 +137,9 @@ static struct irq_controller atari_irq_controller = {
360 137
361void __init atari_init_IRQ(void) 138void __init atari_init_IRQ(void)
362{ 139{
363 m68k_setup_user_interrupt(VEC_USER, NUM_ATARI_SOURCES - IRQ_USER, NULL); 140 m68k_setup_user_interrupt(VEC_USER, NUM_ATARI_SOURCES - IRQ_USER);
364 m68k_setup_irq_controller(&atari_irq_controller, 1, NUM_ATARI_SOURCES - 1); 141 m68k_setup_irq_controller(&atari_irq_chip, handle_simple_irq, 1,
142 NUM_ATARI_SOURCES - 1);
365 143
366 /* Initialize the MFP(s) */ 144 /* Initialize the MFP(s) */
367 145
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 1edd95095cb4..81286476f740 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -86,7 +86,7 @@ static void bvme6000_get_model(char *model)
86 */ 86 */
87static void __init bvme6000_init_IRQ(void) 87static void __init bvme6000_init_IRQ(void)
88{ 88{
89 m68k_setup_user_interrupt(VEC_USER, 192, NULL); 89 m68k_setup_user_interrupt(VEC_USER, 192);
90} 90}
91 91
92void __init config_bvme6000(void) 92void __init config_bvme6000(void)
diff --git a/arch/m68k/hp300/time.c b/arch/m68k/hp300/time.c
index f6312c7d8727..c87fe69b0728 100644
--- a/arch/m68k/hp300/time.c
+++ b/arch/m68k/hp300/time.c
@@ -70,7 +70,7 @@ void __init hp300_sched_init(irq_handler_t vector)
70 70
71 asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE)); 71 asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE));
72 72
73 if (request_irq(IRQ_AUTO_6, hp300_tick, IRQ_FLG_STD, "timer tick", vector)) 73 if (request_irq(IRQ_AUTO_6, hp300_tick, 0, "timer tick", vector))
74 pr_err("Couldn't register timer interrupt\n"); 74 pr_err("Couldn't register timer interrupt\n");
75 75
76 out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */ 76 out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
diff --git a/arch/m68k/include/asm/hardirq.h b/arch/m68k/include/asm/hardirq.h
index 870e5347155b..db30ed276878 100644
--- a/arch/m68k/include/asm/hardirq.h
+++ b/arch/m68k/include/asm/hardirq.h
@@ -18,6 +18,11 @@
18 18
19#ifdef CONFIG_MMU 19#ifdef CONFIG_MMU
20 20
21static inline void ack_bad_irq(unsigned int irq)
22{
23 pr_crit("unexpected IRQ trap at vector %02x\n", irq);
24}
25
21/* entry.S is sensitive to the offsets of these fields */ 26/* entry.S is sensitive to the offsets of these fields */
22typedef struct { 27typedef struct {
23 unsigned int __softirq_pending; 28 unsigned int __softirq_pending;
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index 69ed0d74d532..6198df5ff245 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -27,11 +27,6 @@
27 27
28#ifdef CONFIG_MMU 28#ifdef CONFIG_MMU
29 29
30#include <linux/linkage.h>
31#include <linux/hardirq.h>
32#include <linux/irqreturn.h>
33#include <linux/spinlock_types.h>
34
35/* 30/*
36 * Interrupt source definitions 31 * Interrupt source definitions
37 * General interrupt sources are the level 1-7. 32 * General interrupt sources are the level 1-7.
@@ -54,10 +49,6 @@
54 49
55#define IRQ_USER 8 50#define IRQ_USER 8
56 51
57extern unsigned int irq_canonicalize(unsigned int irq);
58
59struct pt_regs;
60
61/* 52/*
62 * various flags for request_irq() - the Amiga now uses the standard 53 * various flags for request_irq() - the Amiga now uses the standard
63 * mechanism like all other architectures - IRQF_DISABLED and 54 * mechanism like all other architectures - IRQF_DISABLED and
@@ -71,57 +62,27 @@ struct pt_regs;
71#define IRQ_FLG_STD (0x8000) /* internally used */ 62#define IRQ_FLG_STD (0x8000) /* internally used */
72#endif 63#endif
73 64
74/* 65struct irq_data;
75 * This structure is used to chain together the ISRs for a particular 66struct irq_chip;
76 * interrupt source (if it supports chaining). 67struct irq_desc;
77 */ 68extern unsigned int m68k_irq_startup(struct irq_data *data);
78typedef struct irq_node { 69extern unsigned int m68k_irq_startup_irq(unsigned int irq);
79 irqreturn_t (*handler)(int, void *); 70extern void m68k_irq_shutdown(struct irq_data *data);
80 void *dev_id; 71extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int,
81 struct irq_node *next; 72 struct pt_regs *));
82 unsigned long flags; 73extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt);
83 const char *devname; 74extern void m68k_setup_irq_controller(struct irq_chip *,
84} irq_node_t; 75 void (*handle)(unsigned int irq,
85 76 struct irq_desc *desc),
86/* 77 unsigned int irq, unsigned int cnt);
87 * This structure has only 4 elements for speed reasons
88 */
89struct irq_handler {
90 int (*handler)(int, void *);
91 unsigned long flags;
92 void *dev_id;
93 const char *devname;
94};
95
96struct irq_controller {
97 const char *name;
98 spinlock_t lock;
99 int (*startup)(unsigned int irq);
100 void (*shutdown)(unsigned int irq);
101 void (*enable)(unsigned int irq);
102 void (*disable)(unsigned int irq);
103};
104
105extern int m68k_irq_startup(unsigned int);
106extern void m68k_irq_shutdown(unsigned int);
107
108/*
109 * This function returns a new irq_node_t
110 */
111extern irq_node_t *new_irq_node(void);
112 78
113extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *)); 79extern unsigned int irq_canonicalize(unsigned int irq);
114extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt,
115 void (*handler)(unsigned int, struct pt_regs *));
116extern void m68k_setup_irq_controller(struct irq_controller *, unsigned int, unsigned int);
117
118asmlinkage void m68k_handle_int(unsigned int);
119asmlinkage void __m68k_handle_int(unsigned int, struct pt_regs *);
120 80
121#else 81#else
122#define irq_canonicalize(irq) (irq) 82#define irq_canonicalize(irq) (irq)
123#endif /* CONFIG_MMU */ 83#endif /* CONFIG_MMU */
124 84
125asmlinkage void do_IRQ(int irq, struct pt_regs *regs); 85asmlinkage void do_IRQ(int irq, struct pt_regs *regs);
86extern atomic_t irq_err_count;
126 87
127#endif /* _M68K_IRQ_H_ */ 88#endif /* _M68K_IRQ_H_ */
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index c2a1c5eac1a6..12ebe43b008b 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -12,6 +12,8 @@ extern void mac_reset(void);
12extern void mac_poweroff(void); 12extern void mac_poweroff(void);
13extern void mac_init_IRQ(void); 13extern void mac_init_IRQ(void);
14extern int mac_irq_pending(unsigned int); 14extern int mac_irq_pending(unsigned int);
15extern void mac_irq_enable(struct irq_data *data);
16extern void mac_irq_disable(struct irq_data *data);
15 17
16/* 18/*
17 * Floppy driver magic hook - probably shouldn't be here 19 * Floppy driver magic hook - probably shouldn't be here
diff --git a/arch/m68k/include/asm/q40ints.h b/arch/m68k/include/asm/q40ints.h
index 3d970afb708f..22f12c9eb910 100644
--- a/arch/m68k/include/asm/q40ints.h
+++ b/arch/m68k/include/asm/q40ints.h
@@ -24,6 +24,3 @@
24#define Q40_IRQ10_MASK (1<<5) 24#define Q40_IRQ10_MASK (1<<5)
25#define Q40_IRQ14_MASK (1<<6) 25#define Q40_IRQ14_MASK (1<<6)
26#define Q40_IRQ15_MASK (1<<7) 26#define Q40_IRQ15_MASK (1<<7)
27
28extern unsigned long q40_probe_irq_on (void);
29extern int q40_probe_irq_off (unsigned long irqs);
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 43f984e93970..303192fc9260 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -350,10 +350,12 @@
350#define __NR_clock_adjtime 342 350#define __NR_clock_adjtime 342
351#define __NR_syncfs 343 351#define __NR_syncfs 343
352#define __NR_setns 344 352#define __NR_setns 344
353#define __NR_process_vm_readv 345
354#define __NR_process_vm_writev 346
353 355
354#ifdef __KERNEL__ 356#ifdef __KERNEL__
355 357
356#define NR_syscalls 345 358#define NR_syscalls 347
357 359
358#define __ARCH_WANT_IPC_PARSE_VERSION 360#define __ARCH_WANT_IPC_PARSE_VERSION
359#define __ARCH_WANT_OLD_READDIR 361#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index e7f0f2e5ad44..c5696193281a 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -6,16 +6,15 @@ extra-$(CONFIG_MMU) := head.o
6extra-$(CONFIG_SUN3) := sun3-head.o 6extra-$(CONFIG_SUN3) := sun3-head.o
7extra-y += vmlinux.lds 7extra-y += vmlinux.lds
8 8
9obj-y := entry.o m68k_ksyms.o module.o process.o ptrace.o setup.o signal.o \ 9obj-y := entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o setup.o \
10 sys_m68k.o syscalltable.o time.o traps.o 10 signal.o sys_m68k.o syscalltable.o time.o traps.o
11 11
12obj-$(CONFIG_MMU) += ints.o devres.o vectors.o 12obj-$(CONFIG_MMU) += ints.o vectors.o
13devres-$(CONFIG_MMU) = ../../../kernel/irq/devres.o
14 13
15ifndef CONFIG_MMU_SUN3 14ifndef CONFIG_MMU_SUN3
16obj-y += dma.o 15obj-y += dma.o
17endif 16endif
18ifndef CONFIG_MMU 17ifndef CONFIG_MMU
19obj-y += init_task.o irq.o 18obj-y += init_task.o
20endif 19endif
21 20
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S
index bd0ec05263b2..c713f514843d 100644
--- a/arch/m68k/kernel/entry_mm.S
+++ b/arch/m68k/kernel/entry_mm.S
@@ -48,7 +48,7 @@
48.globl sys_fork, sys_clone, sys_vfork 48.globl sys_fork, sys_clone, sys_vfork
49.globl ret_from_interrupt, bad_interrupt 49.globl ret_from_interrupt, bad_interrupt
50.globl auto_irqhandler_fixup 50.globl auto_irqhandler_fixup
51.globl user_irqvec_fixup, user_irqhandler_fixup 51.globl user_irqvec_fixup
52 52
53.text 53.text
54ENTRY(buserr) 54ENTRY(buserr)
@@ -207,7 +207,7 @@ ENTRY(auto_inthandler)
207 movel %sp,%sp@- 207 movel %sp,%sp@-
208 movel %d0,%sp@- | put vector # on stack 208 movel %d0,%sp@- | put vector # on stack
209auto_irqhandler_fixup = . + 2 209auto_irqhandler_fixup = . + 2
210 jsr __m68k_handle_int | process the IRQ 210 jsr do_IRQ | process the IRQ
211 addql #8,%sp | pop parameters off stack 211 addql #8,%sp | pop parameters off stack
212 212
213ret_from_interrupt: 213ret_from_interrupt:
@@ -240,8 +240,7 @@ user_irqvec_fixup = . + 2
240 240
241 movel %sp,%sp@- 241 movel %sp,%sp@-
242 movel %d0,%sp@- | put vector # on stack 242 movel %d0,%sp@- | put vector # on stack
243user_irqhandler_fixup = . + 2 243 jsr do_IRQ | process the IRQ
244 jsr __m68k_handle_int | process the IRQ
245 addql #8,%sp | pop parameters off stack 244 addql #8,%sp | pop parameters off stack
246 245
247 subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) 246 subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c
index 761ee0440c99..74fefac00899 100644
--- a/arch/m68k/kernel/ints.c
+++ b/arch/m68k/kernel/ints.c
@@ -4,25 +4,6 @@
4 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file COPYING in the main directory of this archive 5 * License. See the file COPYING in the main directory of this archive
6 * for more details. 6 * for more details.
7 *
8 * 07/03/96: Timer initialization, and thus mach_sched_init(),
9 * removed from request_irq() and moved to init_time().
10 * We should therefore consider renaming our add_isr() and
11 * remove_isr() to request_irq() and free_irq()
12 * respectively, so they are compliant with the other
13 * architectures. /Jes
14 * 11/07/96: Changed all add_/remove_isr() to request_/free_irq() calls.
15 * Removed irq list support, if any machine needs an irq server
16 * it must implement this itself (as it's already done), instead
17 * only default handler are used with mach_default_handler.
18 * request_irq got some flags different from other architectures:
19 * - IRQ_FLG_REPLACE : Replace an existing handler (the default one
20 * can be replaced without this flag)
21 * - IRQ_FLG_LOCK : handler can't be replaced
22 * There are other machine depending flags, see there
23 * If you want to replace a default handler you should know what
24 * you're doing, since it might handle different other irq sources
25 * which must be served /Roman Zippel
26 */ 7 */
27 8
28#include <linux/module.h> 9#include <linux/module.h>
@@ -47,33 +28,22 @@
47#endif 28#endif
48 29
49extern u32 auto_irqhandler_fixup[]; 30extern u32 auto_irqhandler_fixup[];
50extern u32 user_irqhandler_fixup[];
51extern u16 user_irqvec_fixup[]; 31extern u16 user_irqvec_fixup[];
52 32
53/* table for system interrupt handlers */
54static struct irq_node *irq_list[NR_IRQS];
55static struct irq_controller *irq_controller[NR_IRQS];
56static int irq_depth[NR_IRQS];
57
58static int m68k_first_user_vec; 33static int m68k_first_user_vec;
59 34
60static struct irq_controller auto_irq_controller = { 35static struct irq_chip auto_irq_chip = {
61 .name = "auto", 36 .name = "auto",
62 .lock = __SPIN_LOCK_UNLOCKED(auto_irq_controller.lock), 37 .irq_startup = m68k_irq_startup,
63 .startup = m68k_irq_startup, 38 .irq_shutdown = m68k_irq_shutdown,
64 .shutdown = m68k_irq_shutdown,
65}; 39};
66 40
67static struct irq_controller user_irq_controller = { 41static struct irq_chip user_irq_chip = {
68 .name = "user", 42 .name = "user",
69 .lock = __SPIN_LOCK_UNLOCKED(user_irq_controller.lock), 43 .irq_startup = m68k_irq_startup,
70 .startup = m68k_irq_startup, 44 .irq_shutdown = m68k_irq_shutdown,
71 .shutdown = m68k_irq_shutdown,
72}; 45};
73 46
74#define NUM_IRQ_NODES 100
75static irq_node_t nodes[NUM_IRQ_NODES];
76
77/* 47/*
78 * void init_IRQ(void) 48 * void init_IRQ(void)
79 * 49 *
@@ -96,7 +66,7 @@ void __init init_IRQ(void)
96 } 66 }
97 67
98 for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++) 68 for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++)
99 irq_controller[i] = &auto_irq_controller; 69 irq_set_chip_and_handler(i, &auto_irq_chip, handle_simple_irq);
100 70
101 mach_init_IRQ(); 71 mach_init_IRQ();
102} 72}
@@ -106,7 +76,7 @@ void __init init_IRQ(void)
106 * @handler: called from auto vector interrupts 76 * @handler: called from auto vector interrupts
107 * 77 *
108 * setup the handler to be called from auto vector interrupts instead of the 78 * setup the handler to be called from auto vector interrupts instead of the
109 * standard __m68k_handle_int(), it will be called with irq numbers in the range 79 * standard do_IRQ(), it will be called with irq numbers in the range
110 * from IRQ_AUTO_1 - IRQ_AUTO_7. 80 * from IRQ_AUTO_1 - IRQ_AUTO_7.
111 */ 81 */
112void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *)) 82void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *))
@@ -120,217 +90,49 @@ void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_re
120 * m68k_setup_user_interrupt 90 * m68k_setup_user_interrupt
121 * @vec: first user vector interrupt to handle 91 * @vec: first user vector interrupt to handle
122 * @cnt: number of active user vector interrupts 92 * @cnt: number of active user vector interrupts
123 * @handler: called from user vector interrupts
124 * 93 *
125 * setup user vector interrupts, this includes activating the specified range 94 * setup user vector interrupts, this includes activating the specified range
126 * of interrupts, only then these interrupts can be requested (note: this is 95 * of interrupts, only then these interrupts can be requested (note: this is
127 * different from auto vector interrupts). An optional handler can be installed 96 * different from auto vector interrupts).
128 * to be called instead of the default __m68k_handle_int(), it will be called
129 * with irq numbers starting from IRQ_USER.
130 */ 97 */
131void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt, 98void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt)
132 void (*handler)(unsigned int, struct pt_regs *))
133{ 99{
134 int i; 100 int i;
135 101
136 BUG_ON(IRQ_USER + cnt > NR_IRQS); 102 BUG_ON(IRQ_USER + cnt > NR_IRQS);
137 m68k_first_user_vec = vec; 103 m68k_first_user_vec = vec;
138 for (i = 0; i < cnt; i++) 104 for (i = 0; i < cnt; i++)
139 irq_controller[IRQ_USER + i] = &user_irq_controller; 105 irq_set_chip(IRQ_USER + i, &user_irq_chip);
140 *user_irqvec_fixup = vec - IRQ_USER; 106 *user_irqvec_fixup = vec - IRQ_USER;
141 if (handler)
142 *user_irqhandler_fixup = (u32)handler;
143 flush_icache(); 107 flush_icache();
144} 108}
145 109
146/** 110/**
147 * m68k_setup_irq_controller 111 * m68k_setup_irq_controller
148 * @contr: irq controller which controls specified irq 112 * @chip: irq chip which controls specified irq
113 * @handle: flow handler which handles specified irq
149 * @irq: first irq to be managed by the controller 114 * @irq: first irq to be managed by the controller
115 * @cnt: number of irqs to be managed by the controller
150 * 116 *
151 * Change the controller for the specified range of irq, which will be used to 117 * Change the controller for the specified range of irq, which will be used to
152 * manage these irq. auto/user irq already have a default controller, which can 118 * manage these irq. auto/user irq already have a default controller, which can
153 * be changed as well, but the controller probably should use m68k_irq_startup/ 119 * be changed as well, but the controller probably should use m68k_irq_startup/
154 * m68k_irq_shutdown. 120 * m68k_irq_shutdown.
155 */ 121 */
156void m68k_setup_irq_controller(struct irq_controller *contr, unsigned int irq, 122void m68k_setup_irq_controller(struct irq_chip *chip,
123 irq_flow_handler_t handle, unsigned int irq,
157 unsigned int cnt) 124 unsigned int cnt)
158{ 125{
159 int i; 126 int i;
160 127
161 for (i = 0; i < cnt; i++) 128 for (i = 0; i < cnt; i++) {
162 irq_controller[irq + i] = contr; 129 irq_set_chip(irq + i, chip);
163} 130 if (handle)
164 131 irq_set_handler(irq + i, handle);
165irq_node_t *new_irq_node(void)
166{
167 irq_node_t *node;
168 short i;
169
170 for (node = nodes, i = NUM_IRQ_NODES-1; i >= 0; node++, i--) {
171 if (!node->handler) {
172 memset(node, 0, sizeof(*node));
173 return node;
174 }
175 } 132 }
176
177 printk ("new_irq_node: out of nodes\n");
178 return NULL;
179} 133}
180 134
181int setup_irq(unsigned int irq, struct irq_node *node) 135unsigned int m68k_irq_startup_irq(unsigned int irq)
182{
183 struct irq_controller *contr;
184 struct irq_node **prev;
185 unsigned long flags;
186
187 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
188 printk("%s: Incorrect IRQ %d from %s\n",
189 __func__, irq, node->devname);
190 return -ENXIO;
191 }
192
193 spin_lock_irqsave(&contr->lock, flags);
194
195 prev = irq_list + irq;
196 if (*prev) {
197 /* Can't share interrupts unless both agree to */
198 if (!((*prev)->flags & node->flags & IRQF_SHARED)) {
199 spin_unlock_irqrestore(&contr->lock, flags);
200 return -EBUSY;
201 }
202 while (*prev)
203 prev = &(*prev)->next;
204 }
205
206 if (!irq_list[irq]) {
207 if (contr->startup)
208 contr->startup(irq);
209 else
210 contr->enable(irq);
211 }
212 node->next = NULL;
213 *prev = node;
214
215 spin_unlock_irqrestore(&contr->lock, flags);
216
217 return 0;
218}
219
220int request_irq(unsigned int irq,
221 irq_handler_t handler,
222 unsigned long flags, const char *devname, void *dev_id)
223{
224 struct irq_node *node;
225 int res;
226
227 node = new_irq_node();
228 if (!node)
229 return -ENOMEM;
230
231 node->handler = handler;
232 node->flags = flags;
233 node->dev_id = dev_id;
234 node->devname = devname;
235
236 res = setup_irq(irq, node);
237 if (res)
238 node->handler = NULL;
239
240 return res;
241}
242
243EXPORT_SYMBOL(request_irq);
244
245void free_irq(unsigned int irq, void *dev_id)
246{
247 struct irq_controller *contr;
248 struct irq_node **p, *node;
249 unsigned long flags;
250
251 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
252 printk("%s: Incorrect IRQ %d\n", __func__, irq);
253 return;
254 }
255
256 spin_lock_irqsave(&contr->lock, flags);
257
258 p = irq_list + irq;
259 while ((node = *p)) {
260 if (node->dev_id == dev_id)
261 break;
262 p = &node->next;
263 }
264
265 if (node) {
266 *p = node->next;
267 node->handler = NULL;
268 } else
269 printk("%s: Removing probably wrong IRQ %d\n",
270 __func__, irq);
271
272 if (!irq_list[irq]) {
273 if (contr->shutdown)
274 contr->shutdown(irq);
275 else
276 contr->disable(irq);
277 }
278
279 spin_unlock_irqrestore(&contr->lock, flags);
280}
281
282EXPORT_SYMBOL(free_irq);
283
284void enable_irq(unsigned int irq)
285{
286 struct irq_controller *contr;
287 unsigned long flags;
288
289 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
290 printk("%s: Incorrect IRQ %d\n",
291 __func__, irq);
292 return;
293 }
294
295 spin_lock_irqsave(&contr->lock, flags);
296 if (irq_depth[irq]) {
297 if (!--irq_depth[irq]) {
298 if (contr->enable)
299 contr->enable(irq);
300 }
301 } else
302 WARN_ON(1);
303 spin_unlock_irqrestore(&contr->lock, flags);
304}
305
306EXPORT_SYMBOL(enable_irq);
307
308void disable_irq(unsigned int irq)
309{
310 struct irq_controller *contr;
311 unsigned long flags;
312
313 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
314 printk("%s: Incorrect IRQ %d\n",
315 __func__, irq);
316 return;
317 }
318
319 spin_lock_irqsave(&contr->lock, flags);
320 if (!irq_depth[irq]++) {
321 if (contr->disable)
322 contr->disable(irq);
323 }
324 spin_unlock_irqrestore(&contr->lock, flags);
325}
326
327EXPORT_SYMBOL(disable_irq);
328
329void disable_irq_nosync(unsigned int irq) __attribute__((alias("disable_irq")));
330
331EXPORT_SYMBOL(disable_irq_nosync);
332
333int m68k_irq_startup(unsigned int irq)
334{ 136{
335 if (irq <= IRQ_AUTO_7) 137 if (irq <= IRQ_AUTO_7)
336 vectors[VEC_SPUR + irq] = auto_inthandler; 138 vectors[VEC_SPUR + irq] = auto_inthandler;
@@ -339,41 +141,21 @@ int m68k_irq_startup(unsigned int irq)
339 return 0; 141 return 0;
340} 142}
341 143
342void m68k_irq_shutdown(unsigned int irq) 144unsigned int m68k_irq_startup(struct irq_data *data)
343{ 145{
344 if (irq <= IRQ_AUTO_7) 146 return m68k_irq_startup_irq(data->irq);
345 vectors[VEC_SPUR + irq] = bad_inthandler;
346 else
347 vectors[m68k_first_user_vec + irq - IRQ_USER] = bad_inthandler;
348} 147}
349 148
350 149void m68k_irq_shutdown(struct irq_data *data)
351/*
352 * Do we need these probe functions on the m68k?
353 *
354 * ... may be useful with ISA devices
355 */
356unsigned long probe_irq_on (void)
357{ 150{
358#ifdef CONFIG_Q40 151 unsigned int irq = data->irq;
359 if (MACH_IS_Q40)
360 return q40_probe_irq_on();
361#endif
362 return 0;
363}
364 152
365EXPORT_SYMBOL(probe_irq_on); 153 if (irq <= IRQ_AUTO_7)
366 154 vectors[VEC_SPUR + irq] = bad_inthandler;
367int probe_irq_off (unsigned long irqs) 155 else
368{ 156 vectors[m68k_first_user_vec + irq - IRQ_USER] = bad_inthandler;
369#ifdef CONFIG_Q40
370 if (MACH_IS_Q40)
371 return q40_probe_irq_off(irqs);
372#endif
373 return 0;
374} 157}
375 158
376EXPORT_SYMBOL(probe_irq_off);
377 159
378unsigned int irq_canonicalize(unsigned int irq) 160unsigned int irq_canonicalize(unsigned int irq)
379{ 161{
@@ -386,52 +168,9 @@ unsigned int irq_canonicalize(unsigned int irq)
386 168
387EXPORT_SYMBOL(irq_canonicalize); 169EXPORT_SYMBOL(irq_canonicalize);
388 170
389asmlinkage void m68k_handle_int(unsigned int irq)
390{
391 struct irq_node *node;
392 kstat_cpu(0).irqs[irq]++;
393 node = irq_list[irq];
394 do {
395 node->handler(irq, node->dev_id);
396 node = node->next;
397 } while (node);
398}
399
400asmlinkage void __m68k_handle_int(unsigned int irq, struct pt_regs *regs)
401{
402 struct pt_regs *old_regs;
403 old_regs = set_irq_regs(regs);
404 m68k_handle_int(irq);
405 set_irq_regs(old_regs);
406}
407 171
408asmlinkage void handle_badint(struct pt_regs *regs) 172asmlinkage void handle_badint(struct pt_regs *regs)
409{ 173{
410 kstat_cpu(0).irqs[0]++; 174 atomic_inc(&irq_err_count);
411 printk("unexpected interrupt from %u\n", regs->vector); 175 pr_warn("unexpected interrupt from %u\n", regs->vector);
412}
413
414int show_interrupts(struct seq_file *p, void *v)
415{
416 struct irq_controller *contr;
417 struct irq_node *node;
418 int i = *(loff_t *) v;
419
420 /* autovector interrupts */
421 if (irq_list[i]) {
422 contr = irq_controller[i];
423 node = irq_list[i];
424 seq_printf(p, "%-8s %3u: %10u %s", contr->name, i, kstat_cpu(0).irqs[i], node->devname);
425 while ((node = node->next))
426 seq_printf(p, ", %s", node->devname);
427 seq_puts(p, "\n");
428 }
429 return 0;
430}
431
432#ifdef CONFIG_PROC_FS
433void init_irq_proc(void)
434{
435 /* Insert /proc/irq driver here */
436} 176}
437#endif
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index c468f2edaa85..ce827b376110 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -365,4 +365,6 @@ ENTRY(sys_call_table)
365 .long sys_clock_adjtime 365 .long sys_clock_adjtime
366 .long sys_syncfs 366 .long sys_syncfs
367 .long sys_setns 367 .long sys_setns
368 .long sys_process_vm_readv /* 345 */
369 .long sys_process_vm_writev
368 370
diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c
index 2a96bebd8969..b403924a1cad 100644
--- a/arch/m68k/mac/baboon.c
+++ b/arch/m68k/mac/baboon.c
@@ -11,6 +11,7 @@
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/irq.h>
14 15
15#include <asm/traps.h> 16#include <asm/traps.h>
16#include <asm/bootinfo.h> 17#include <asm/bootinfo.h>
@@ -20,9 +21,6 @@
20 21
21/* #define DEBUG_IRQS */ 22/* #define DEBUG_IRQS */
22 23
23extern void mac_enable_irq(unsigned int);
24extern void mac_disable_irq(unsigned int);
25
26int baboon_present; 24int baboon_present;
27static volatile struct baboon *baboon; 25static volatile struct baboon *baboon;
28static unsigned char baboon_disabled; 26static unsigned char baboon_disabled;
@@ -53,7 +51,7 @@ void __init baboon_init(void)
53 * Baboon interrupt handler. This works a lot like a VIA. 51 * Baboon interrupt handler. This works a lot like a VIA.
54 */ 52 */
55 53
56static irqreturn_t baboon_irq(int irq, void *dev_id) 54static void baboon_irq(unsigned int irq, struct irq_desc *desc)
57{ 55{
58 int irq_bit, irq_num; 56 int irq_bit, irq_num;
59 unsigned char events; 57 unsigned char events;
@@ -64,15 +62,16 @@ static irqreturn_t baboon_irq(int irq, void *dev_id)
64 (uint) baboon->mb_status); 62 (uint) baboon->mb_status);
65#endif 63#endif
66 64
67 if (!(events = baboon->mb_ifr & 0x07)) 65 events = baboon->mb_ifr & 0x07;
68 return IRQ_NONE; 66 if (!events)
67 return;
69 68
70 irq_num = IRQ_BABOON_0; 69 irq_num = IRQ_BABOON_0;
71 irq_bit = 1; 70 irq_bit = 1;
72 do { 71 do {
73 if (events & irq_bit) { 72 if (events & irq_bit) {
74 baboon->mb_ifr &= ~irq_bit; 73 baboon->mb_ifr &= ~irq_bit;
75 m68k_handle_int(irq_num); 74 generic_handle_irq(irq_num);
76 } 75 }
77 irq_bit <<= 1; 76 irq_bit <<= 1;
78 irq_num++; 77 irq_num++;
@@ -82,7 +81,6 @@ static irqreturn_t baboon_irq(int irq, void *dev_id)
82 /* for now we need to smash all interrupts */ 81 /* for now we need to smash all interrupts */
83 baboon->mb_ifr &= ~events; 82 baboon->mb_ifr &= ~events;
84#endif 83#endif
85 return IRQ_HANDLED;
86} 84}
87 85
88/* 86/*
@@ -92,8 +90,7 @@ static irqreturn_t baboon_irq(int irq, void *dev_id)
92void __init baboon_register_interrupts(void) 90void __init baboon_register_interrupts(void)
93{ 91{
94 baboon_disabled = 0; 92 baboon_disabled = 0;
95 if (request_irq(IRQ_NUBUS_C, baboon_irq, 0, "baboon", (void *)baboon)) 93 irq_set_chained_handler(IRQ_NUBUS_C, baboon_irq);
96 pr_err("Couldn't register baboon interrupt\n");
97} 94}
98 95
99/* 96/*
@@ -111,7 +108,7 @@ void baboon_irq_enable(int irq)
111 108
112 baboon_disabled &= ~(1 << irq_idx); 109 baboon_disabled &= ~(1 << irq_idx);
113 if (!baboon_disabled) 110 if (!baboon_disabled)
114 mac_enable_irq(IRQ_NUBUS_C); 111 mac_irq_enable(irq_get_irq_data(IRQ_NUBUS_C));
115} 112}
116 113
117void baboon_irq_disable(int irq) 114void baboon_irq_disable(int irq)
@@ -124,7 +121,7 @@ void baboon_irq_disable(int irq)
124 121
125 baboon_disabled |= 1 << irq_idx; 122 baboon_disabled |= 1 << irq_idx;
126 if (baboon_disabled) 123 if (baboon_disabled)
127 mac_disable_irq(IRQ_NUBUS_C); 124 mac_irq_disable(irq_get_irq_data(IRQ_NUBUS_C));
128} 125}
129 126
130void baboon_irq_clear(int irq) 127void baboon_irq_clear(int irq)
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index 1ad4e9d80eba..a5462cc0bfd6 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -305,15 +305,13 @@ void __init iop_register_interrupts(void)
305{ 305{
306 if (iop_ism_present) { 306 if (iop_ism_present) {
307 if (oss_present) { 307 if (oss_present) {
308 if (request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq, 308 if (request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq, 0,
309 IRQ_FLG_LOCK, "ISM IOP", 309 "ISM IOP", (void *)IOP_NUM_ISM))
310 (void *) IOP_NUM_ISM))
311 pr_err("Couldn't register ISM IOP interrupt\n"); 310 pr_err("Couldn't register ISM IOP interrupt\n");
312 oss_irq_enable(IRQ_MAC_ADB); 311 oss_irq_enable(IRQ_MAC_ADB);
313 } else { 312 } else {
314 if (request_irq(IRQ_VIA2_0, iop_ism_irq, 313 if (request_irq(IRQ_VIA2_0, iop_ism_irq, 0, "ISM IOP",
315 IRQ_FLG_LOCK|IRQ_FLG_FAST, "ISM IOP", 314 (void *)IOP_NUM_ISM))
316 (void *) IOP_NUM_ISM))
317 pr_err("Couldn't register ISM IOP interrupt\n"); 315 pr_err("Couldn't register ISM IOP interrupt\n");
318 } 316 }
319 if (!iop_alive(iop_base[IOP_NUM_ISM])) { 317 if (!iop_alive(iop_base[IOP_NUM_ISM])) {
diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
index f92190c159b4..ba220b70ab8c 100644
--- a/arch/m68k/mac/macints.c
+++ b/arch/m68k/mac/macints.c
@@ -190,14 +190,10 @@ irqreturn_t mac_debug_handler(int, void *);
190 190
191/* #define DEBUG_MACINTS */ 191/* #define DEBUG_MACINTS */
192 192
193void mac_enable_irq(unsigned int irq); 193static struct irq_chip mac_irq_chip = {
194void mac_disable_irq(unsigned int irq);
195
196static struct irq_controller mac_irq_controller = {
197 .name = "mac", 194 .name = "mac",
198 .lock = __SPIN_LOCK_UNLOCKED(mac_irq_controller.lock), 195 .irq_enable = mac_irq_enable,
199 .enable = mac_enable_irq, 196 .irq_disable = mac_irq_disable,
200 .disable = mac_disable_irq,
201}; 197};
202 198
203void __init mac_init_IRQ(void) 199void __init mac_init_IRQ(void)
@@ -205,7 +201,7 @@ void __init mac_init_IRQ(void)
205#ifdef DEBUG_MACINTS 201#ifdef DEBUG_MACINTS
206 printk("mac_init_IRQ(): Setting things up...\n"); 202 printk("mac_init_IRQ(): Setting things up...\n");
207#endif 203#endif
208 m68k_setup_irq_controller(&mac_irq_controller, IRQ_USER, 204 m68k_setup_irq_controller(&mac_irq_chip, handle_simple_irq, IRQ_USER,
209 NUM_MAC_SOURCES - IRQ_USER); 205 NUM_MAC_SOURCES - IRQ_USER);
210 /* Make sure the SONIC interrupt is cleared or things get ugly */ 206 /* Make sure the SONIC interrupt is cleared or things get ugly */
211#ifdef SHUTUP_SONIC 207#ifdef SHUTUP_SONIC
@@ -241,16 +237,17 @@ void __init mac_init_IRQ(void)
241} 237}
242 238
243/* 239/*
244 * mac_enable_irq - enable an interrupt source 240 * mac_irq_enable - enable an interrupt source
245 * mac_disable_irq - disable an interrupt source 241 * mac_irq_disable - disable an interrupt source
246 * mac_clear_irq - clears a pending interrupt 242 * mac_clear_irq - clears a pending interrupt
247 * mac_pending_irq - Returns the pending status of an IRQ (nonzero = pending) 243 * mac_irq_pending - returns the pending status of an IRQ (nonzero = pending)
248 * 244 *
249 * These routines are just dispatchers to the VIA/OSS/PSC routines. 245 * These routines are just dispatchers to the VIA/OSS/PSC routines.
250 */ 246 */
251 247
252void mac_enable_irq(unsigned int irq) 248void mac_irq_enable(struct irq_data *data)
253{ 249{
250 int irq = data->irq;
254 int irq_src = IRQ_SRC(irq); 251 int irq_src = IRQ_SRC(irq);
255 252
256 switch(irq_src) { 253 switch(irq_src) {
@@ -283,8 +280,9 @@ void mac_enable_irq(unsigned int irq)
283 } 280 }
284} 281}
285 282
286void mac_disable_irq(unsigned int irq) 283void mac_irq_disable(struct irq_data *data)
287{ 284{
285 int irq = data->irq;
288 int irq_src = IRQ_SRC(irq); 286 int irq_src = IRQ_SRC(irq);
289 287
290 switch(irq_src) { 288 switch(irq_src) {
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index a9c0f5ab4cc0..a4c82dab9ff1 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -19,6 +19,7 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/irq.h>
22 23
23#include <asm/bootinfo.h> 24#include <asm/bootinfo.h>
24#include <asm/macintosh.h> 25#include <asm/macintosh.h>
@@ -29,10 +30,7 @@
29int oss_present; 30int oss_present;
30volatile struct mac_oss *oss; 31volatile struct mac_oss *oss;
31 32
32static irqreturn_t oss_irq(int, void *); 33extern void via1_irq(unsigned int irq, struct irq_desc *desc);
33static irqreturn_t oss_nubus_irq(int, void *);
34
35extern irqreturn_t via1_irq(int, void *);
36 34
37/* 35/*
38 * Initialize the OSS 36 * Initialize the OSS
@@ -60,26 +58,6 @@ void __init oss_init(void)
60} 58}
61 59
62/* 60/*
63 * Register the OSS and NuBus interrupt dispatchers.
64 */
65
66void __init oss_register_interrupts(void)
67{
68 if (request_irq(OSS_IRQLEV_SCSI, oss_irq, IRQ_FLG_LOCK,
69 "scsi", (void *) oss))
70 pr_err("Couldn't register %s interrupt\n", "scsi");
71 if (request_irq(OSS_IRQLEV_NUBUS, oss_nubus_irq, IRQ_FLG_LOCK,
72 "nubus", (void *) oss))
73 pr_err("Couldn't register %s interrupt\n", "nubus");
74 if (request_irq(OSS_IRQLEV_SOUND, oss_irq, IRQ_FLG_LOCK,
75 "sound", (void *) oss))
76 pr_err("Couldn't register %s interrupt\n", "sound");
77 if (request_irq(OSS_IRQLEV_VIA1, via1_irq, IRQ_FLG_LOCK,
78 "via1", (void *) via1))
79 pr_err("Couldn't register %s interrupt\n", "via1");
80}
81
82/*
83 * Initialize OSS for Nubus access 61 * Initialize OSS for Nubus access
84 */ 62 */
85 63
@@ -92,17 +70,17 @@ void __init oss_nubus_init(void)
92 * and SCSI; everything else is routed to its own autovector IRQ. 70 * and SCSI; everything else is routed to its own autovector IRQ.
93 */ 71 */
94 72
95static irqreturn_t oss_irq(int irq, void *dev_id) 73static void oss_irq(unsigned int irq, struct irq_desc *desc)
96{ 74{
97 int events; 75 int events;
98 76
99 events = oss->irq_pending & (OSS_IP_SOUND|OSS_IP_SCSI); 77 events = oss->irq_pending & (OSS_IP_SOUND|OSS_IP_SCSI);
100 if (!events) 78 if (!events)
101 return IRQ_NONE; 79 return;
102 80
103#ifdef DEBUG_IRQS 81#ifdef DEBUG_IRQS
104 if ((console_loglevel == 10) && !(events & OSS_IP_SCSI)) { 82 if ((console_loglevel == 10) && !(events & OSS_IP_SCSI)) {
105 printk("oss_irq: irq %d events = 0x%04X\n", irq, 83 printk("oss_irq: irq %u events = 0x%04X\n", irq,
106 (int) oss->irq_pending); 84 (int) oss->irq_pending);
107 } 85 }
108#endif 86#endif
@@ -113,11 +91,10 @@ static irqreturn_t oss_irq(int irq, void *dev_id)
113 /* FIXME: call sound handler */ 91 /* FIXME: call sound handler */
114 } else if (events & OSS_IP_SCSI) { 92 } else if (events & OSS_IP_SCSI) {
115 oss->irq_pending &= ~OSS_IP_SCSI; 93 oss->irq_pending &= ~OSS_IP_SCSI;
116 m68k_handle_int(IRQ_MAC_SCSI); 94 generic_handle_irq(IRQ_MAC_SCSI);
117 } else { 95 } else {
118 /* FIXME: error check here? */ 96 /* FIXME: error check here? */
119 } 97 }
120 return IRQ_HANDLED;
121} 98}
122 99
123/* 100/*
@@ -126,13 +103,13 @@ static irqreturn_t oss_irq(int irq, void *dev_id)
126 * Unlike the VIA/RBV this is on its own autovector interrupt level. 103 * Unlike the VIA/RBV this is on its own autovector interrupt level.
127 */ 104 */
128 105
129static irqreturn_t oss_nubus_irq(int irq, void *dev_id) 106static void oss_nubus_irq(unsigned int irq, struct irq_desc *desc)
130{ 107{
131 int events, irq_bit, i; 108 int events, irq_bit, i;
132 109
133 events = oss->irq_pending & OSS_IP_NUBUS; 110 events = oss->irq_pending & OSS_IP_NUBUS;
134 if (!events) 111 if (!events)
135 return IRQ_NONE; 112 return;
136 113
137#ifdef DEBUG_NUBUS_INT 114#ifdef DEBUG_NUBUS_INT
138 if (console_loglevel > 7) { 115 if (console_loglevel > 7) {
@@ -148,10 +125,21 @@ static irqreturn_t oss_nubus_irq(int irq, void *dev_id)
148 irq_bit >>= 1; 125 irq_bit >>= 1;
149 if (events & irq_bit) { 126 if (events & irq_bit) {
150 oss->irq_pending &= ~irq_bit; 127 oss->irq_pending &= ~irq_bit;
151 m68k_handle_int(NUBUS_SOURCE_BASE + i); 128 generic_handle_irq(NUBUS_SOURCE_BASE + i);
152 } 129 }
153 } while(events & (irq_bit - 1)); 130 } while(events & (irq_bit - 1));
154 return IRQ_HANDLED; 131}
132
133/*
134 * Register the OSS and NuBus interrupt dispatchers.
135 */
136
137void __init oss_register_interrupts(void)
138{
139 irq_set_chained_handler(OSS_IRQLEV_SCSI, oss_irq);
140 irq_set_chained_handler(OSS_IRQLEV_NUBUS, oss_nubus_irq);
141 irq_set_chained_handler(OSS_IRQLEV_SOUND, oss_irq);
142 irq_set_chained_handler(OSS_IRQLEV_VIA1, via1_irq);
155} 143}
156 144
157/* 145/*
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index a4c3eb60706e..e6c2d20f328d 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -18,6 +18,7 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/irq.h>
21 22
22#include <asm/traps.h> 23#include <asm/traps.h>
23#include <asm/bootinfo.h> 24#include <asm/bootinfo.h>
@@ -30,8 +31,6 @@
30int psc_present; 31int psc_present;
31volatile __u8 *psc; 32volatile __u8 *psc;
32 33
33irqreturn_t psc_irq(int, void *);
34
35/* 34/*
36 * Debugging dump, used in various places to see what's going on. 35 * Debugging dump, used in various places to see what's going on.
37 */ 36 */
@@ -112,52 +111,52 @@ void __init psc_init(void)
112} 111}
113 112
114/* 113/*
115 * Register the PSC interrupt dispatchers for autovector interrupts 3-6.
116 */
117
118void __init psc_register_interrupts(void)
119{
120 if (request_irq(IRQ_AUTO_3, psc_irq, 0, "psc3", (void *) 0x30))
121 pr_err("Couldn't register psc%d interrupt\n", 3);
122 if (request_irq(IRQ_AUTO_4, psc_irq, 0, "psc4", (void *) 0x40))
123 pr_err("Couldn't register psc%d interrupt\n", 4);
124 if (request_irq(IRQ_AUTO_5, psc_irq, 0, "psc5", (void *) 0x50))
125 pr_err("Couldn't register psc%d interrupt\n", 5);
126 if (request_irq(IRQ_AUTO_6, psc_irq, 0, "psc6", (void *) 0x60))
127 pr_err("Couldn't register psc%d interrupt\n", 6);
128}
129
130/*
131 * PSC interrupt handler. It's a lot like the VIA interrupt handler. 114 * PSC interrupt handler. It's a lot like the VIA interrupt handler.
132 */ 115 */
133 116
134irqreturn_t psc_irq(int irq, void *dev_id) 117static void psc_irq(unsigned int irq, struct irq_desc *desc)
135{ 118{
136 int pIFR = pIFRbase + ((int) dev_id); 119 unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc);
137 int pIER = pIERbase + ((int) dev_id); 120 int pIFR = pIFRbase + offset;
121 int pIER = pIERbase + offset;
138 int irq_num; 122 int irq_num;
139 unsigned char irq_bit, events; 123 unsigned char irq_bit, events;
140 124
141#ifdef DEBUG_IRQS 125#ifdef DEBUG_IRQS
142 printk("psc_irq: irq %d pIFR = 0x%02X pIER = 0x%02X\n", 126 printk("psc_irq: irq %u pIFR = 0x%02X pIER = 0x%02X\n",
143 irq, (int) psc_read_byte(pIFR), (int) psc_read_byte(pIER)); 127 irq, (int) psc_read_byte(pIFR), (int) psc_read_byte(pIER));
144#endif 128#endif
145 129
146 events = psc_read_byte(pIFR) & psc_read_byte(pIER) & 0xF; 130 events = psc_read_byte(pIFR) & psc_read_byte(pIER) & 0xF;
147 if (!events) 131 if (!events)
148 return IRQ_NONE; 132 return;
149 133
150 irq_num = irq << 3; 134 irq_num = irq << 3;
151 irq_bit = 1; 135 irq_bit = 1;
152 do { 136 do {
153 if (events & irq_bit) { 137 if (events & irq_bit) {
154 psc_write_byte(pIFR, irq_bit); 138 psc_write_byte(pIFR, irq_bit);
155 m68k_handle_int(irq_num); 139 generic_handle_irq(irq_num);
156 } 140 }
157 irq_num++; 141 irq_num++;
158 irq_bit <<= 1; 142 irq_bit <<= 1;
159 } while (events >= irq_bit); 143 } while (events >= irq_bit);
160 return IRQ_HANDLED; 144}
145
146/*
147 * Register the PSC interrupt dispatchers for autovector interrupts 3-6.
148 */
149
150void __init psc_register_interrupts(void)
151{
152 irq_set_chained_handler(IRQ_AUTO_3, psc_irq);
153 irq_set_handler_data(IRQ_AUTO_3, (void *)0x30);
154 irq_set_chained_handler(IRQ_AUTO_4, psc_irq);
155 irq_set_handler_data(IRQ_AUTO_4, (void *)0x40);
156 irq_set_chained_handler(IRQ_AUTO_5, psc_irq);
157 irq_set_handler_data(IRQ_AUTO_5, (void *)0x50);
158 irq_set_chained_handler(IRQ_AUTO_6, psc_irq);
159 irq_set_handler_data(IRQ_AUTO_6, (void *)0x60);
161} 160}
162 161
163void psc_irq_enable(int irq) { 162void psc_irq_enable(int irq) {
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index e71166daec6a..f1600ad26621 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -28,6 +28,7 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/irq.h>
31 32
32#include <asm/bootinfo.h> 33#include <asm/bootinfo.h>
33#include <asm/macintosh.h> 34#include <asm/macintosh.h>
@@ -77,9 +78,6 @@ static int gIER,gIFR,gBufA,gBufB;
77static u8 nubus_disabled; 78static u8 nubus_disabled;
78 79
79void via_debug_dump(void); 80void via_debug_dump(void);
80irqreturn_t via1_irq(int, void *);
81irqreturn_t via2_irq(int, void *);
82irqreturn_t via_nubus_irq(int, void *);
83void via_irq_enable(int irq); 81void via_irq_enable(int irq);
84void via_irq_disable(int irq); 82void via_irq_disable(int irq);
85void via_irq_clear(int irq); 83void via_irq_clear(int irq);
@@ -281,40 +279,11 @@ void __init via_init_clock(irq_handler_t func)
281 via1[vT1CL] = MAC_CLOCK_LOW; 279 via1[vT1CL] = MAC_CLOCK_LOW;
282 via1[vT1CH] = MAC_CLOCK_HIGH; 280 via1[vT1CH] = MAC_CLOCK_HIGH;
283 281
284 if (request_irq(IRQ_MAC_TIMER_1, func, IRQ_FLG_LOCK, "timer", func)) 282 if (request_irq(IRQ_MAC_TIMER_1, func, 0, "timer", func))
285 pr_err("Couldn't register %s interrupt\n", "timer"); 283 pr_err("Couldn't register %s interrupt\n", "timer");
286} 284}
287 285
288/* 286/*
289 * Register the interrupt dispatchers for VIA or RBV machines only.
290 */
291
292void __init via_register_interrupts(void)
293{
294 if (via_alt_mapping) {
295 if (request_irq(IRQ_AUTO_1, via1_irq,
296 IRQ_FLG_LOCK|IRQ_FLG_FAST, "software",
297 (void *) via1))
298 pr_err("Couldn't register %s interrupt\n", "software");
299 if (request_irq(IRQ_AUTO_6, via1_irq,
300 IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
301 (void *) via1))
302 pr_err("Couldn't register %s interrupt\n", "via1");
303 } else {
304 if (request_irq(IRQ_AUTO_1, via1_irq,
305 IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
306 (void *) via1))
307 pr_err("Couldn't register %s interrupt\n", "via1");
308 }
309 if (request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
310 "via2", (void *) via2))
311 pr_err("Couldn't register %s interrupt\n", "via2");
312 if (request_irq(IRQ_MAC_NUBUS, via_nubus_irq,
313 IRQ_FLG_LOCK|IRQ_FLG_FAST, "nubus", (void *) via2))
314 pr_err("Couldn't register %s interrupt\n", "nubus");
315}
316
317/*
318 * Debugging dump, used in various places to see what's going on. 287 * Debugging dump, used in various places to see what's going on.
319 */ 288 */
320 289
@@ -446,48 +415,46 @@ void __init via_nubus_init(void)
446 * via6522.c :-), disable/pending masks added. 415 * via6522.c :-), disable/pending masks added.
447 */ 416 */
448 417
449irqreturn_t via1_irq(int irq, void *dev_id) 418void via1_irq(unsigned int irq, struct irq_desc *desc)
450{ 419{
451 int irq_num; 420 int irq_num;
452 unsigned char irq_bit, events; 421 unsigned char irq_bit, events;
453 422
454 events = via1[vIFR] & via1[vIER] & 0x7F; 423 events = via1[vIFR] & via1[vIER] & 0x7F;
455 if (!events) 424 if (!events)
456 return IRQ_NONE; 425 return;
457 426
458 irq_num = VIA1_SOURCE_BASE; 427 irq_num = VIA1_SOURCE_BASE;
459 irq_bit = 1; 428 irq_bit = 1;
460 do { 429 do {
461 if (events & irq_bit) { 430 if (events & irq_bit) {
462 via1[vIFR] = irq_bit; 431 via1[vIFR] = irq_bit;
463 m68k_handle_int(irq_num); 432 generic_handle_irq(irq_num);
464 } 433 }
465 ++irq_num; 434 ++irq_num;
466 irq_bit <<= 1; 435 irq_bit <<= 1;
467 } while (events >= irq_bit); 436 } while (events >= irq_bit);
468 return IRQ_HANDLED;
469} 437}
470 438
471irqreturn_t via2_irq(int irq, void *dev_id) 439static void via2_irq(unsigned int irq, struct irq_desc *desc)
472{ 440{
473 int irq_num; 441 int irq_num;
474 unsigned char irq_bit, events; 442 unsigned char irq_bit, events;
475 443
476 events = via2[gIFR] & via2[gIER] & 0x7F; 444 events = via2[gIFR] & via2[gIER] & 0x7F;
477 if (!events) 445 if (!events)
478 return IRQ_NONE; 446 return;
479 447
480 irq_num = VIA2_SOURCE_BASE; 448 irq_num = VIA2_SOURCE_BASE;
481 irq_bit = 1; 449 irq_bit = 1;
482 do { 450 do {
483 if (events & irq_bit) { 451 if (events & irq_bit) {
484 via2[gIFR] = irq_bit | rbv_clear; 452 via2[gIFR] = irq_bit | rbv_clear;
485 m68k_handle_int(irq_num); 453 generic_handle_irq(irq_num);
486 } 454 }
487 ++irq_num; 455 ++irq_num;
488 irq_bit <<= 1; 456 irq_bit <<= 1;
489 } while (events >= irq_bit); 457 } while (events >= irq_bit);
490 return IRQ_HANDLED;
491} 458}
492 459
493/* 460/*
@@ -495,7 +462,7 @@ irqreturn_t via2_irq(int irq, void *dev_id)
495 * VIA2 dispatcher as a fast interrupt handler. 462 * VIA2 dispatcher as a fast interrupt handler.
496 */ 463 */
497 464
498irqreturn_t via_nubus_irq(int irq, void *dev_id) 465void via_nubus_irq(unsigned int irq, struct irq_desc *desc)
499{ 466{
500 int slot_irq; 467 int slot_irq;
501 unsigned char slot_bit, events; 468 unsigned char slot_bit, events;
@@ -506,7 +473,7 @@ irqreturn_t via_nubus_irq(int irq, void *dev_id)
506 else 473 else
507 events &= ~via2[vDirA]; 474 events &= ~via2[vDirA];
508 if (!events) 475 if (!events)
509 return IRQ_NONE; 476 return;
510 477
511 do { 478 do {
512 slot_irq = IRQ_NUBUS_F; 479 slot_irq = IRQ_NUBUS_F;
@@ -514,7 +481,7 @@ irqreturn_t via_nubus_irq(int irq, void *dev_id)
514 do { 481 do {
515 if (events & slot_bit) { 482 if (events & slot_bit) {
516 events &= ~slot_bit; 483 events &= ~slot_bit;
517 m68k_handle_int(slot_irq); 484 generic_handle_irq(slot_irq);
518 } 485 }
519 --slot_irq; 486 --slot_irq;
520 slot_bit >>= 1; 487 slot_bit >>= 1;
@@ -528,7 +495,24 @@ irqreturn_t via_nubus_irq(int irq, void *dev_id)
528 else 495 else
529 events &= ~via2[vDirA]; 496 events &= ~via2[vDirA];
530 } while (events); 497 } while (events);
531 return IRQ_HANDLED; 498}
499
500/*
501 * Register the interrupt dispatchers for VIA or RBV machines only.
502 */
503
504void __init via_register_interrupts(void)
505{
506 if (via_alt_mapping) {
507 /* software interrupt */
508 irq_set_chained_handler(IRQ_AUTO_1, via1_irq);
509 /* via1 interrupt */
510 irq_set_chained_handler(IRQ_AUTO_6, via1_irq);
511 } else {
512 irq_set_chained_handler(IRQ_AUTO_1, via1_irq);
513 }
514 irq_set_chained_handler(IRQ_AUTO_2, via2_irq);
515 irq_set_chained_handler(IRQ_MAC_NUBUS, via_nubus_irq);
532} 516}
533 517
534void via_irq_enable(int irq) { 518void via_irq_enable(int irq) {
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index 6cb9c3a9b6c9..5de924ef42ed 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -81,7 +81,7 @@ static void mvme147_get_model(char *model)
81 81
82void __init mvme147_init_IRQ(void) 82void __init mvme147_init_IRQ(void)
83{ 83{
84 m68k_setup_user_interrupt(VEC_USER, 192, NULL); 84 m68k_setup_user_interrupt(VEC_USER, 192);
85} 85}
86 86
87void __init config_mvme147(void) 87void __init config_mvme147(void)
@@ -114,8 +114,7 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
114void mvme147_sched_init (irq_handler_t timer_routine) 114void mvme147_sched_init (irq_handler_t timer_routine)
115{ 115{
116 tick_handler = timer_routine; 116 tick_handler = timer_routine;
117 if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, IRQ_FLG_REPLACE, 117 if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, 0, "timer 1", NULL))
118 "timer 1", NULL))
119 pr_err("Couldn't register timer interrupt\n"); 118 pr_err("Couldn't register timer interrupt\n");
120 119
121 /* Init the clock with a value */ 120 /* Init the clock with a value */
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 0b28e2621653..31a66d99cbca 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -117,7 +117,7 @@ static void mvme16x_get_hardware_list(struct seq_file *m)
117 117
118static void __init mvme16x_init_IRQ (void) 118static void __init mvme16x_init_IRQ (void)
119{ 119{
120 m68k_setup_user_interrupt(VEC_USER, 192, NULL); 120 m68k_setup_user_interrupt(VEC_USER, 192);
121} 121}
122 122
123#define pcc2chip ((volatile u_char *)0xfff42000) 123#define pcc2chip ((volatile u_char *)0xfff42000)
diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c
index 9f0e3d59bf92..2b888491f29a 100644
--- a/arch/m68k/q40/q40ints.c
+++ b/arch/m68k/q40/q40ints.c
@@ -15,10 +15,10 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/irq.h>
18 19
19#include <asm/ptrace.h> 20#include <asm/ptrace.h>
20#include <asm/system.h> 21#include <asm/system.h>
21#include <asm/irq.h>
22#include <asm/traps.h> 22#include <asm/traps.h>
23 23
24#include <asm/q40_master.h> 24#include <asm/q40_master.h>
@@ -35,35 +35,36 @@
35*/ 35*/
36 36
37static void q40_irq_handler(unsigned int, struct pt_regs *fp); 37static void q40_irq_handler(unsigned int, struct pt_regs *fp);
38static void q40_enable_irq(unsigned int); 38static void q40_irq_enable(struct irq_data *data);
39static void q40_disable_irq(unsigned int); 39static void q40_irq_disable(struct irq_data *data);
40 40
41unsigned short q40_ablecount[35]; 41unsigned short q40_ablecount[35];
42unsigned short q40_state[35]; 42unsigned short q40_state[35];
43 43
44static int q40_irq_startup(unsigned int irq) 44static unsigned int q40_irq_startup(struct irq_data *data)
45{ 45{
46 unsigned int irq = data->irq;
47
46 /* test for ISA ints not implemented by HW */ 48 /* test for ISA ints not implemented by HW */
47 switch (irq) { 49 switch (irq) {
48 case 1: case 2: case 8: case 9: 50 case 1: case 2: case 8: case 9:
49 case 11: case 12: case 13: 51 case 11: case 12: case 13:
50 printk("%s: ISA IRQ %d not implemented by HW\n", __func__, irq); 52 printk("%s: ISA IRQ %d not implemented by HW\n", __func__, irq);
51 return -ENXIO; 53 /* FIXME return -ENXIO; */
52 } 54 }
53 return 0; 55 return 0;
54} 56}
55 57
56static void q40_irq_shutdown(unsigned int irq) 58static void q40_irq_shutdown(struct irq_data *data)
57{ 59{
58} 60}
59 61
60static struct irq_controller q40_irq_controller = { 62static struct irq_chip q40_irq_chip = {
61 .name = "q40", 63 .name = "q40",
62 .lock = __SPIN_LOCK_UNLOCKED(q40_irq_controller.lock), 64 .irq_startup = q40_irq_startup,
63 .startup = q40_irq_startup, 65 .irq_shutdown = q40_irq_shutdown,
64 .shutdown = q40_irq_shutdown, 66 .irq_enable = q40_irq_enable,
65 .enable = q40_enable_irq, 67 .irq_disable = q40_irq_disable,
66 .disable = q40_disable_irq,
67}; 68};
68 69
69/* 70/*
@@ -81,13 +82,14 @@ static int disabled;
81 82
82void __init q40_init_IRQ(void) 83void __init q40_init_IRQ(void)
83{ 84{
84 m68k_setup_irq_controller(&q40_irq_controller, 1, Q40_IRQ_MAX); 85 m68k_setup_irq_controller(&q40_irq_chip, handle_simple_irq, 1,
86 Q40_IRQ_MAX);
85 87
86 /* setup handler for ISA ints */ 88 /* setup handler for ISA ints */
87 m68k_setup_auto_interrupt(q40_irq_handler); 89 m68k_setup_auto_interrupt(q40_irq_handler);
88 90
89 m68k_irq_startup(IRQ_AUTO_2); 91 m68k_irq_startup_irq(IRQ_AUTO_2);
90 m68k_irq_startup(IRQ_AUTO_4); 92 m68k_irq_startup_irq(IRQ_AUTO_4);
91 93
92 /* now enable some ints.. */ 94 /* now enable some ints.. */
93 master_outb(1, EXT_ENABLE_REG); /* ISA IRQ 5-15 */ 95 master_outb(1, EXT_ENABLE_REG); /* ISA IRQ 5-15 */
@@ -218,11 +220,11 @@ static void q40_irq_handler(unsigned int irq, struct pt_regs *fp)
218 switch (irq) { 220 switch (irq) {
219 case 4: 221 case 4:
220 case 6: 222 case 6:
221 __m68k_handle_int(Q40_IRQ_SAMPLE, fp); 223 do_IRQ(Q40_IRQ_SAMPLE, fp);
222 return; 224 return;
223 } 225 }
224 if (mir & Q40_IRQ_FRAME_MASK) { 226 if (mir & Q40_IRQ_FRAME_MASK) {
225 __m68k_handle_int(Q40_IRQ_FRAME, fp); 227 do_IRQ(Q40_IRQ_FRAME, fp);
226 master_outb(-1, FRAME_CLEAR_REG); 228 master_outb(-1, FRAME_CLEAR_REG);
227 } 229 }
228 if ((mir & Q40_IRQ_SER_MASK) || (mir & Q40_IRQ_EXT_MASK)) { 230 if ((mir & Q40_IRQ_SER_MASK) || (mir & Q40_IRQ_EXT_MASK)) {
@@ -257,7 +259,7 @@ static void q40_irq_handler(unsigned int irq, struct pt_regs *fp)
257 goto iirq; 259 goto iirq;
258 } 260 }
259 q40_state[irq] |= IRQ_INPROGRESS; 261 q40_state[irq] |= IRQ_INPROGRESS;
260 __m68k_handle_int(irq, fp); 262 do_IRQ(irq, fp);
261 q40_state[irq] &= ~IRQ_INPROGRESS; 263 q40_state[irq] &= ~IRQ_INPROGRESS;
262 264
263 /* naively enable everything, if that fails than */ 265 /* naively enable everything, if that fails than */
@@ -288,25 +290,29 @@ static void q40_irq_handler(unsigned int irq, struct pt_regs *fp)
288 mir = master_inb(IIRQ_REG); 290 mir = master_inb(IIRQ_REG);
289 /* should test whether keyboard irq is really enabled, doing it in defhand */ 291 /* should test whether keyboard irq is really enabled, doing it in defhand */
290 if (mir & Q40_IRQ_KEYB_MASK) 292 if (mir & Q40_IRQ_KEYB_MASK)
291 __m68k_handle_int(Q40_IRQ_KEYBOARD, fp); 293 do_IRQ(Q40_IRQ_KEYBOARD, fp);
292 294
293 return; 295 return;
294} 296}
295 297
296void q40_enable_irq(unsigned int irq) 298void q40_irq_enable(struct irq_data *data)
297{ 299{
300 unsigned int irq = data->irq;
301
298 if (irq >= 5 && irq <= 15) { 302 if (irq >= 5 && irq <= 15) {
299 mext_disabled--; 303 mext_disabled--;
300 if (mext_disabled > 0) 304 if (mext_disabled > 0)
301 printk("q40_enable_irq : nested disable/enable\n"); 305 printk("q40_irq_enable : nested disable/enable\n");
302 if (mext_disabled == 0) 306 if (mext_disabled == 0)
303 master_outb(1, EXT_ENABLE_REG); 307 master_outb(1, EXT_ENABLE_REG);
304 } 308 }
305} 309}
306 310
307 311
308void q40_disable_irq(unsigned int irq) 312void q40_irq_disable(struct irq_data *data)
309{ 313{
314 unsigned int irq = data->irq;
315
310 /* disable ISA iqs : only do something if the driver has been 316 /* disable ISA iqs : only do something if the driver has been
311 * verified to be Q40 "compatible" - right now IDE, NE2K 317 * verified to be Q40 "compatible" - right now IDE, NE2K
312 * Any driver should not attempt to sleep across disable_irq !! 318 * Any driver should not attempt to sleep across disable_irq !!
@@ -319,13 +325,3 @@ void q40_disable_irq(unsigned int irq)
319 printk("disable_irq nesting count %d\n",mext_disabled); 325 printk("disable_irq nesting count %d\n",mext_disabled);
320 } 326 }
321} 327}
322
323unsigned long q40_probe_irq_on(void)
324{
325 printk("irq probing not working - reconfigure the driver to avoid this\n");
326 return -1;
327}
328int q40_probe_irq_off(unsigned long irqs)
329{
330 return -1;
331}
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index 6464ad3ae3e6..78b60f53e90a 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -51,25 +51,29 @@ void sun3_disable_irq(unsigned int irq)
51 51
52static irqreturn_t sun3_int7(int irq, void *dev_id) 52static irqreturn_t sun3_int7(int irq, void *dev_id)
53{ 53{
54 *sun3_intreg |= (1 << irq); 54 unsigned int cnt;
55 if (!(kstat_cpu(0).irqs[irq] % 2000)) 55
56 sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 16000) / 2000]); 56 cnt = kstat_irqs_cpu(irq, 0);
57 if (!(cnt % 2000))
58 sun3_leds(led_pattern[cnt % 16000 / 2000]);
57 return IRQ_HANDLED; 59 return IRQ_HANDLED;
58} 60}
59 61
60static irqreturn_t sun3_int5(int irq, void *dev_id) 62static irqreturn_t sun3_int5(int irq, void *dev_id)
61{ 63{
64 unsigned int cnt;
65
62#ifdef CONFIG_SUN3 66#ifdef CONFIG_SUN3
63 intersil_clear(); 67 intersil_clear();
64#endif 68#endif
65 *sun3_intreg |= (1 << irq);
66#ifdef CONFIG_SUN3 69#ifdef CONFIG_SUN3
67 intersil_clear(); 70 intersil_clear();
68#endif 71#endif
69 xtime_update(1); 72 xtime_update(1);
70 update_process_times(user_mode(get_irq_regs())); 73 update_process_times(user_mode(get_irq_regs()));
71 if (!(kstat_cpu(0).irqs[irq] % 20)) 74 cnt = kstat_irqs_cpu(irq, 0);
72 sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]); 75 if (!(cnt % 20))
76 sun3_leds(led_pattern[cnt % 160 / 20]);
73 return IRQ_HANDLED; 77 return IRQ_HANDLED;
74} 78}
75 79
@@ -79,29 +83,33 @@ static irqreturn_t sun3_vec255(int irq, void *dev_id)
79 return IRQ_HANDLED; 83 return IRQ_HANDLED;
80} 84}
81 85
82static void sun3_inthandle(unsigned int irq, struct pt_regs *fp) 86static void sun3_irq_enable(struct irq_data *data)
83{ 87{
84 *sun3_intreg &= ~(1 << irq); 88 sun3_enable_irq(data->irq);
89};
85 90
86 __m68k_handle_int(irq, fp); 91static void sun3_irq_disable(struct irq_data *data)
87} 92{
93 sun3_disable_irq(data->irq);
94};
88 95
89static struct irq_controller sun3_irq_controller = { 96static struct irq_chip sun3_irq_chip = {
90 .name = "sun3", 97 .name = "sun3",
91 .lock = __SPIN_LOCK_UNLOCKED(sun3_irq_controller.lock), 98 .irq_startup = m68k_irq_startup,
92 .startup = m68k_irq_startup, 99 .irq_shutdown = m68k_irq_shutdown,
93 .shutdown = m68k_irq_shutdown, 100 .irq_enable = sun3_irq_enable,
94 .enable = sun3_enable_irq, 101 .irq_disable = sun3_irq_disable,
95 .disable = sun3_disable_irq, 102 .irq_mask = sun3_irq_disable,
103 .irq_unmask = sun3_irq_enable,
96}; 104};
97 105
98void __init sun3_init_IRQ(void) 106void __init sun3_init_IRQ(void)
99{ 107{
100 *sun3_intreg = 1; 108 *sun3_intreg = 1;
101 109
102 m68k_setup_auto_interrupt(sun3_inthandle); 110 m68k_setup_irq_controller(&sun3_irq_chip, handle_level_irq, IRQ_AUTO_1,
103 m68k_setup_irq_controller(&sun3_irq_controller, IRQ_AUTO_1, 7); 111 7);
104 m68k_setup_user_interrupt(VEC_USER, 128, NULL); 112 m68k_setup_user_interrupt(VEC_USER, 128);
105 113
106 if (request_irq(IRQ_AUTO_5, sun3_int5, 0, "int5", NULL)) 114 if (request_irq(IRQ_AUTO_5, sun3_int5, 0, "int5", NULL))
107 pr_err("Couldn't register %s interrupt\n", "int5"); 115 pr_err("Couldn't register %s interrupt\n", "int5");
diff --git a/arch/microblaze/include/asm/namei.h b/arch/microblaze/include/asm/namei.h
deleted file mode 100644
index 61d60b8a07d5..000000000000
--- a/arch/microblaze/include/asm/namei.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright (C) 2006 Atmark Techno, Inc.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#ifndef _ASM_MICROBLAZE_NAMEI_H
10#define _ASM_MICROBLAZE_NAMEI_H
11
12#ifdef __KERNEL__
13
14/* This dummy routine maybe changed to something useful
15 * for /usr/gnemul/ emulation stuff.
16 * Look at asm-sparc/namei.h for details.
17 */
18#define __emul_prefix() NULL
19
20#endif /* __KERNEL__ */
21
22#endif /* _ASM_MICROBLAZE_NAMEI_H */
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 9b4cb00407d7..0be318609fc6 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -286,11 +286,11 @@ CLEAN_FILES += vmlinux.32 vmlinux.64
286archprepare: 286archprepare:
287ifdef CONFIG_MIPS32_N32 287ifdef CONFIG_MIPS32_N32
288 @echo ' Checking missing-syscalls for N32' 288 @echo ' Checking missing-syscalls for N32'
289 $(Q)$(MAKE) $(build)=. missing-syscalls ccflags-y="-mabi=n32" 289 $(Q)$(MAKE) $(build)=. missing-syscalls missing_syscalls_flags="-mabi=n32"
290endif 290endif
291ifdef CONFIG_MIPS32_O32 291ifdef CONFIG_MIPS32_O32
292 @echo ' Checking missing-syscalls for O32' 292 @echo ' Checking missing-syscalls for O32'
293 $(Q)$(MAKE) $(build)=. missing-syscalls ccflags-y="-mabi=32" 293 $(Q)$(MAKE) $(build)=. missing-syscalls missing_syscalls_flags="-mabi=32"
294endif 294endif
295 295
296install: 296install:
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
index 975c20327bb1..0a430e06f5e5 100644
--- a/arch/mips/cavium-octeon/flash_setup.c
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -17,8 +17,6 @@
17 17
18static struct map_info flash_map; 18static struct map_info flash_map;
19static struct mtd_info *mymtd; 19static struct mtd_info *mymtd;
20static int nr_parts;
21static struct mtd_partition *parts;
22static const char *part_probe_types[] = { 20static const char *part_probe_types[] = {
23 "cmdlinepart", 21 "cmdlinepart",
24#ifdef CONFIG_MTD_REDBOOT_PARTS 22#ifdef CONFIG_MTD_REDBOOT_PARTS
@@ -61,11 +59,8 @@ static int __init flash_init(void)
61 mymtd = do_map_probe("cfi_probe", &flash_map); 59 mymtd = do_map_probe("cfi_probe", &flash_map);
62 if (mymtd) { 60 if (mymtd) {
63 mymtd->owner = THIS_MODULE; 61 mymtd->owner = THIS_MODULE;
64 62 mtd_device_parse_register(mymtd, part_probe_types,
65 nr_parts = parse_mtd_partitions(mymtd, 63 0, NULL, 0);
66 part_probe_types,
67 &parts, 0);
68 mtd_device_register(mymtd, parts, nr_parts);
69 } else { 64 } else {
70 pr_err("Failed to register MTD device for flash\n"); 65 pr_err("Failed to register MTD device for flash\n");
71 } 66 }
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 8b606423bbd7..efcfff4d4627 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -207,8 +207,9 @@ void octeon_prepare_cpus(unsigned int max_cpus)
207 * the other bits alone. 207 * the other bits alone.
208 */ 208 */
209 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff); 209 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
210 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, 210 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
211 "SMP-IPI", mailbox_interrupt)) { 211 IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
212 mailbox_interrupt)) {
212 panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); 213 panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n");
213 } 214 }
214} 215}
diff --git a/arch/mips/emma/common/prom.c b/arch/mips/emma/common/prom.c
index 708f08761406..cae42259d6da 100644
--- a/arch/mips/emma/common/prom.c
+++ b/arch/mips/emma/common/prom.c
@@ -50,7 +50,7 @@ void __init prom_init(void)
50 50
51 /* arg[0] is "g", the rest is boot parameters */ 51 /* arg[0] is "g", the rest is boot parameters */
52 for (i = 1; i < argc; i++) { 52 for (i = 1; i < argc; i++) {
53 if (strlen(arcs_cmdline) + strlen(arg[i] + 1) 53 if (strlen(arcs_cmdline) + strlen(arg[i]) + 1
54 >= sizeof(arcs_cmdline)) 54 >= sizeof(arcs_cmdline))
55 break; 55 break;
56 strcat(arcs_cmdline, arg[i]); 56 strcat(arcs_cmdline, arg[i]);
diff --git a/arch/mips/include/asm/mach-bcm47xx/gpio.h b/arch/mips/include/asm/mach-bcm47xx/gpio.h
index 76961cabeedf..2ef17e8df403 100644
--- a/arch/mips/include/asm/mach-bcm47xx/gpio.h
+++ b/arch/mips/include/asm/mach-bcm47xx/gpio.h
@@ -36,6 +36,8 @@ static inline int gpio_get_value(unsigned gpio)
36 return -EINVAL; 36 return -EINVAL;
37} 37}
38 38
39#define gpio_get_value_cansleep gpio_get_value
40
39static inline void gpio_set_value(unsigned gpio, int value) 41static inline void gpio_set_value(unsigned gpio, int value)
40{ 42{
41 switch (bcm47xx_bus_type) { 43 switch (bcm47xx_bus_type) {
@@ -54,6 +56,19 @@ static inline void gpio_set_value(unsigned gpio, int value)
54 } 56 }
55} 57}
56 58
59#define gpio_set_value_cansleep gpio_set_value
60
61static inline int gpio_cansleep(unsigned gpio)
62{
63 return 0;
64}
65
66static inline int gpio_is_valid(unsigned gpio)
67{
68 return gpio < (BCM47XX_EXTIF_GPIO_LINES + BCM47XX_CHIPCO_GPIO_LINES);
69}
70
71
57static inline int gpio_direction_input(unsigned gpio) 72static inline int gpio_direction_input(unsigned gpio)
58{ 73{
59 switch (bcm47xx_bus_type) { 74 switch (bcm47xx_bus_type) {
@@ -137,7 +152,4 @@ static inline int gpio_polarity(unsigned gpio, int value)
137} 152}
138 153
139 154
140/* cansleep wrappers */
141#include <asm-generic/gpio.h>
142
143#endif /* __BCM47XX_GPIO_H */ 155#endif /* __BCM47XX_GPIO_H */
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index ecea7871dec2..d8dad5340ea3 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -365,16 +365,18 @@
365#define __NR_syncfs (__NR_Linux + 342) 365#define __NR_syncfs (__NR_Linux + 342)
366#define __NR_sendmmsg (__NR_Linux + 343) 366#define __NR_sendmmsg (__NR_Linux + 343)
367#define __NR_setns (__NR_Linux + 344) 367#define __NR_setns (__NR_Linux + 344)
368#define __NR_process_vm_readv (__NR_Linux + 345)
369#define __NR_process_vm_writev (__NR_Linux + 346)
368 370
369/* 371/*
370 * Offset of the last Linux o32 flavoured syscall 372 * Offset of the last Linux o32 flavoured syscall
371 */ 373 */
372#define __NR_Linux_syscalls 344 374#define __NR_Linux_syscalls 346
373 375
374#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 376#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
375 377
376#define __NR_O32_Linux 4000 378#define __NR_O32_Linux 4000
377#define __NR_O32_Linux_syscalls 344 379#define __NR_O32_Linux_syscalls 346
378 380
379#if _MIPS_SIM == _MIPS_SIM_ABI64 381#if _MIPS_SIM == _MIPS_SIM_ABI64
380 382
@@ -686,16 +688,18 @@
686#define __NR_syncfs (__NR_Linux + 301) 688#define __NR_syncfs (__NR_Linux + 301)
687#define __NR_sendmmsg (__NR_Linux + 302) 689#define __NR_sendmmsg (__NR_Linux + 302)
688#define __NR_setns (__NR_Linux + 303) 690#define __NR_setns (__NR_Linux + 303)
691#define __NR_process_vm_readv (__NR_Linux + 304)
692#define __NR_process_vm_writev (__NR_Linux + 305)
689 693
690/* 694/*
691 * Offset of the last Linux 64-bit flavoured syscall 695 * Offset of the last Linux 64-bit flavoured syscall
692 */ 696 */
693#define __NR_Linux_syscalls 303 697#define __NR_Linux_syscalls 305
694 698
695#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 699#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
696 700
697#define __NR_64_Linux 5000 701#define __NR_64_Linux 5000
698#define __NR_64_Linux_syscalls 303 702#define __NR_64_Linux_syscalls 305
699 703
700#if _MIPS_SIM == _MIPS_SIM_NABI32 704#if _MIPS_SIM == _MIPS_SIM_NABI32
701 705
@@ -1012,16 +1016,18 @@
1012#define __NR_syncfs (__NR_Linux + 306) 1016#define __NR_syncfs (__NR_Linux + 306)
1013#define __NR_sendmmsg (__NR_Linux + 307) 1017#define __NR_sendmmsg (__NR_Linux + 307)
1014#define __NR_setns (__NR_Linux + 308) 1018#define __NR_setns (__NR_Linux + 308)
1019#define __NR_process_vm_readv (__NR_Linux + 309)
1020#define __NR_process_vm_writev (__NR_Linux + 310)
1015 1021
1016/* 1022/*
1017 * Offset of the last N32 flavoured syscall 1023 * Offset of the last N32 flavoured syscall
1018 */ 1024 */
1019#define __NR_Linux_syscalls 308 1025#define __NR_Linux_syscalls 310
1020 1026
1021#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1027#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1022 1028
1023#define __NR_N32_Linux 6000 1029#define __NR_N32_Linux 6000
1024#define __NR_N32_Linux_syscalls 308 1030#define __NR_N32_Linux_syscalls 310
1025 1031
1026#ifdef __KERNEL__ 1032#ifdef __KERNEL__
1027 1033
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 98c5a9737c14..e2d8e199be32 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -103,19 +103,10 @@ static int c0_compare_int_pending(void)
103 103
104/* 104/*
105 * Compare interrupt can be routed and latched outside the core, 105 * Compare interrupt can be routed and latched outside the core,
106 * so a single execution hazard barrier may not be enough to give 106 * so wait up to worst case number of cycle counter ticks for timer interrupt
107 * it time to clear as seen in the Cause register. 4 time the 107 * changes to propagate to the cause register.
108 * pipeline depth seems reasonably conservative, and empirically
109 * works better in configurations with high CPU/bus clock ratios.
110 */ 108 */
111 109#define COMPARE_INT_SEEN_TICKS 50
112#define compare_change_hazard() \
113 do { \
114 irq_disable_hazard(); \
115 irq_disable_hazard(); \
116 irq_disable_hazard(); \
117 irq_disable_hazard(); \
118 } while (0)
119 110
120int c0_compare_int_usable(void) 111int c0_compare_int_usable(void)
121{ 112{
@@ -126,8 +117,12 @@ int c0_compare_int_usable(void)
126 * IP7 already pending? Try to clear it by acking the timer. 117 * IP7 already pending? Try to clear it by acking the timer.
127 */ 118 */
128 if (c0_compare_int_pending()) { 119 if (c0_compare_int_pending()) {
129 write_c0_compare(read_c0_count()); 120 cnt = read_c0_count();
130 compare_change_hazard(); 121 write_c0_compare(cnt);
122 back_to_back_c0_hazard();
123 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
124 if (!c0_compare_int_pending())
125 break;
131 if (c0_compare_int_pending()) 126 if (c0_compare_int_pending())
132 return 0; 127 return 0;
133 } 128 }
@@ -136,7 +131,7 @@ int c0_compare_int_usable(void)
136 cnt = read_c0_count(); 131 cnt = read_c0_count();
137 cnt += delta; 132 cnt += delta;
138 write_c0_compare(cnt); 133 write_c0_compare(cnt);
139 compare_change_hazard(); 134 back_to_back_c0_hazard();
140 if ((int)(read_c0_count() - cnt) < 0) 135 if ((int)(read_c0_count() - cnt) < 0)
141 break; 136 break;
142 /* increase delta if the timer was already expired */ 137 /* increase delta if the timer was already expired */
@@ -145,12 +140,17 @@ int c0_compare_int_usable(void)
145 while ((int)(read_c0_count() - cnt) <= 0) 140 while ((int)(read_c0_count() - cnt) <= 0)
146 ; /* Wait for expiry */ 141 ; /* Wait for expiry */
147 142
148 compare_change_hazard(); 143 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
144 if (c0_compare_int_pending())
145 break;
149 if (!c0_compare_int_pending()) 146 if (!c0_compare_int_pending())
150 return 0; 147 return 0;
151 148 cnt = read_c0_count();
152 write_c0_compare(read_c0_count()); 149 write_c0_compare(cnt);
153 compare_change_hazard(); 150 back_to_back_c0_hazard();
151 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
152 if (!c0_compare_int_pending())
153 break;
154 if (c0_compare_int_pending()) 154 if (c0_compare_int_pending())
155 return 0; 155 return 0;
156 156
diff --git a/arch/mips/kernel/cpufreq/loongson2_clock.c b/arch/mips/kernel/cpufreq/loongson2_clock.c
index cefc6e259baf..5426779d9fdb 100644
--- a/arch/mips/kernel/cpufreq/loongson2_clock.c
+++ b/arch/mips/kernel/cpufreq/loongson2_clock.c
@@ -7,6 +7,7 @@
7 * for more details. 7 * for more details.
8 */ 8 */
9 9
10#include <linux/module.h>
10#include <linux/cpufreq.h> 11#include <linux/cpufreq.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12 13
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 4f2971bcf8e5..315fc0b250f8 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -623,7 +623,7 @@ static int mipspmu_event_init(struct perf_event *event)
623 if (!atomic_inc_not_zero(&active_events)) { 623 if (!atomic_inc_not_zero(&active_events)) {
624 if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { 624 if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
625 atomic_dec(&active_events); 625 atomic_dec(&active_events);
626 return -ENOSPC; 626 return -EINVAL;
627 } 627 }
628 628
629 mutex_lock(&pmu_reserve_mutex); 629 mutex_lock(&pmu_reserve_mutex);
@@ -732,15 +732,15 @@ static int validate_group(struct perf_event *event)
732 memset(&fake_cpuc, 0, sizeof(fake_cpuc)); 732 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
733 733
734 if (!validate_event(&fake_cpuc, leader)) 734 if (!validate_event(&fake_cpuc, leader))
735 return -ENOSPC; 735 return -EINVAL;
736 736
737 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { 737 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
738 if (!validate_event(&fake_cpuc, sibling)) 738 if (!validate_event(&fake_cpuc, sibling))
739 return -ENOSPC; 739 return -EINVAL;
740 } 740 }
741 741
742 if (!validate_event(&fake_cpuc, event)) 742 if (!validate_event(&fake_cpuc, event))
743 return -ENOSPC; 743 return -EINVAL;
744 744
745 return 0; 745 return 0;
746} 746}
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 47920657968d..a632bc144efa 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -591,6 +591,8 @@ einval: li v0, -ENOSYS
591 sys sys_syncfs 1 591 sys sys_syncfs 1
592 sys sys_sendmmsg 4 592 sys sys_sendmmsg 4
593 sys sys_setns 2 593 sys sys_setns 2
594 sys sys_process_vm_readv 6 /* 4345 */
595 sys sys_process_vm_writev 6
594 .endm 596 .endm
595 597
596 /* We pre-compute the number of _instruction_ bytes needed to 598 /* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index fb7334bea731..3b5a5e9ae49c 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -430,4 +430,6 @@ sys_call_table:
430 PTR sys_syncfs 430 PTR sys_syncfs
431 PTR sys_sendmmsg 431 PTR sys_sendmmsg
432 PTR sys_setns 432 PTR sys_setns
433 PTR sys_process_vm_readv
434 PTR sys_process_vm_writev /* 5305 */
433 .size sys_call_table,.-sys_call_table 435 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 6de1f598346e..6be6f7020923 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -430,4 +430,6 @@ EXPORT(sysn32_call_table)
430 PTR sys_syncfs 430 PTR sys_syncfs
431 PTR compat_sys_sendmmsg 431 PTR compat_sys_sendmmsg
432 PTR sys_setns 432 PTR sys_setns
433 PTR compat_sys_process_vm_readv
434 PTR compat_sys_process_vm_writev /* 6310 */
433 .size sysn32_call_table,.-sysn32_call_table 435 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 1d813169e453..54228553691d 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -548,4 +548,6 @@ sys_call_table:
548 PTR sys_syncfs 548 PTR sys_syncfs
549 PTR compat_sys_sendmmsg 549 PTR compat_sys_sendmmsg
550 PTR sys_setns 550 PTR sys_setns
551 PTR compat_sys_process_vm_readv /* 4345 */
552 PTR compat_sys_process_vm_writev
551 .size sys_call_table,.-sys_call_table 553 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 261ccbc07740..5c8a49d55054 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1596,7 +1596,8 @@ void __cpuinit per_cpu_trap_init(void)
1596 } 1596 }
1597#endif /* CONFIG_MIPS_MT_SMTC */ 1597#endif /* CONFIG_MIPS_MT_SMTC */
1598 1598
1599 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1599 if (!cpu_data[cpu].asid_cache)
1600 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1600 1601
1601 atomic_inc(&init_mm.mm_count); 1602 atomic_inc(&init_mm.mm_count);
1602 current->active_mm = &init_mm; 1603 current->active_mm = &init_mm;
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index 7e9c0ffc11a5..77ed70fc2fe5 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -7,7 +7,7 @@
7 * Copyright (C) 2010 John Crispin <blogic@openwrt.org> 7 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
8 */ 8 */
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/types.h> 13#include <linux/types.h>
diff --git a/arch/mips/lantiq/devices.c b/arch/mips/lantiq/devices.c
index 44a36771c819..de1cb2bcd79a 100644
--- a/arch/mips/lantiq/devices.c
+++ b/arch/mips/lantiq/devices.c
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 56ba007bf1e5..e34fcfd0d5ca 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -6,7 +6,7 @@
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org> 6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/export.h>
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <asm/bootinfo.h> 11#include <asm/bootinfo.h>
12#include <asm/time.h> 12#include <asm/time.h>
diff --git a/arch/mips/lantiq/setup.c b/arch/mips/lantiq/setup.c
index 9b8af77ed0f9..1ff6c9d6cb93 100644
--- a/arch/mips/lantiq/setup.c
+++ b/arch/mips/lantiq/setup.c
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/io.h> 11#include <linux/io.h>
12#include <linux/ioport.h> 12#include <linux/ioport.h>
13#include <asm/bootinfo.h> 13#include <asm/bootinfo.h>
diff --git a/arch/mips/lantiq/xway/clk-ase.c b/arch/mips/lantiq/xway/clk-ase.c
index 22d823acd536..652258309c9c 100644
--- a/arch/mips/lantiq/xway/clk-ase.c
+++ b/arch/mips/lantiq/xway/clk-ase.c
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/clk.h> 12#include <linux/clk.h>
13 13
diff --git a/arch/mips/lantiq/xway/clk-xway.c b/arch/mips/lantiq/xway/clk-xway.c
index ddd39593c581..696b1a3e0642 100644
--- a/arch/mips/lantiq/xway/clk-xway.c
+++ b/arch/mips/lantiq/xway/clk-xway.c
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/clk.h> 12#include <linux/clk.h>
13 13
diff --git a/arch/mips/lantiq/xway/devices.c b/arch/mips/lantiq/xway/devices.c
index d0e32ab2ea07..d614aa7ff07f 100644
--- a/arch/mips/lantiq/xway/devices.c
+++ b/arch/mips/lantiq/xway/devices.c
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/mtd/physmap.h> 13#include <linux/mtd/physmap.h>
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 4278a459d6c4..cbb6ae5747b9 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -19,6 +19,7 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
22#include <linux/export.h>
22 23
23#include <lantiq_soc.h> 24#include <lantiq_soc.h>
24#include <xway_dma.h> 25#include <xway_dma.h>
diff --git a/arch/mips/lantiq/xway/gpio.c b/arch/mips/lantiq/xway/gpio.c
index a321451a5455..d2fa98f3c78d 100644
--- a/arch/mips/lantiq/xway/gpio.c
+++ b/arch/mips/lantiq/xway/gpio.c
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/gpio.h> 12#include <linux/gpio.h>
13#include <linux/ioport.h> 13#include <linux/ioport.h>
diff --git a/arch/mips/lantiq/xway/gpio_ebu.c b/arch/mips/lantiq/xway/gpio_ebu.c
index a479355abdb9..b91c7f17f10f 100644
--- a/arch/mips/lantiq/xway/gpio_ebu.c
+++ b/arch/mips/lantiq/xway/gpio_ebu.c
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/module.h> 10#include <linux/export.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/mutex.h> 13#include <linux/mutex.h>
diff --git a/arch/mips/lantiq/xway/gpio_stp.c b/arch/mips/lantiq/xway/gpio_stp.c
index 67d59d690340..ff9991cddeaa 100644
--- a/arch/mips/lantiq/xway/gpio_stp.c
+++ b/arch/mips/lantiq/xway/gpio_stp.c
@@ -9,7 +9,7 @@
9 9
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/module.h> 12#include <linux/export.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/mutex.h> 15#include <linux/mutex.h>
diff --git a/arch/mips/lantiq/xway/prom-ase.c b/arch/mips/lantiq/xway/prom-ase.c
index abe49f4db57f..ae4959ae865c 100644
--- a/arch/mips/lantiq/xway/prom-ase.c
+++ b/arch/mips/lantiq/xway/prom-ase.c
@@ -6,7 +6,7 @@
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org> 6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/export.h>
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <asm/bootinfo.h> 11#include <asm/bootinfo.h>
12#include <asm/time.h> 12#include <asm/time.h>
diff --git a/arch/mips/lantiq/xway/prom-xway.c b/arch/mips/lantiq/xway/prom-xway.c
index 1686692ac24d..2228133ca356 100644
--- a/arch/mips/lantiq/xway/prom-xway.c
+++ b/arch/mips/lantiq/xway/prom-xway.c
@@ -6,7 +6,7 @@
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org> 6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/export.h>
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <asm/bootinfo.h> 11#include <asm/bootinfo.h>
12#include <asm/time.h> 12#include <asm/time.h>
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c
index a1be36d0e490..3d41f0bb5bf7 100644
--- a/arch/mips/lantiq/xway/reset.c
+++ b/arch/mips/lantiq/xway/reset.c
@@ -10,7 +10,7 @@
10#include <linux/io.h> 10#include <linux/io.h>
11#include <linux/ioport.h> 11#include <linux/ioport.h>
12#include <linux/pm.h> 12#include <linux/pm.h>
13#include <linux/module.h> 13#include <linux/export.h>
14#include <asm/reboot.h> 14#include <asm/reboot.h>
15 15
16#include <lantiq_soc.h> 16#include <lantiq_soc.h>
diff --git a/arch/mips/nxp/pnx8550/common/pci.c b/arch/mips/nxp/pnx8550/common/pci.c
deleted file mode 100644
index 98e86ddb86cc..000000000000
--- a/arch/mips/nxp/pnx8550/common/pci.c
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 *
3 * BRIEF MODULE DESCRIPTION
4 *
5 * Author: source@mvista.com
6 *
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 */
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/kernel.h>
23#include <linux/init.h>
24
25#include <pci.h>
26#include <glb.h>
27#include <nand.h>
28
29static struct resource pci_io_resource = {
30 .start = PNX8550_PCIIO + 0x1000, /* reserve regacy I/O space */
31 .end = PNX8550_PCIIO + PNX8550_PCIIO_SIZE,
32 .name = "pci IO space",
33 .flags = IORESOURCE_IO
34};
35
36static struct resource pci_mem_resource = {
37 .start = PNX8550_PCIMEM,
38 .end = PNX8550_PCIMEM + PNX8550_PCIMEM_SIZE - 1,
39 .name = "pci memory space",
40 .flags = IORESOURCE_MEM
41};
42
43extern struct pci_ops pnx8550_pci_ops;
44
45static struct pci_controller pnx8550_controller = {
46 .pci_ops = &pnx8550_pci_ops,
47 .io_map_base = PNX8550_PORT_BASE,
48 .io_resource = &pci_io_resource,
49 .mem_resource = &pci_mem_resource,
50};
51
52/* Return the total size of DRAM-memory, (RANK0 + RANK1) */
53static inline unsigned long get_system_mem_size(void)
54{
55 /* Read IP2031_RANK0_ADDR_LO */
56 unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010);
57 /* Read IP2031_RANK1_ADDR_HI */
58 unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018);
59
60 return dram_r1_hi - dram_r0_lo + 1;
61}
62
63static int __init pnx8550_pci_setup(void)
64{
65 int pci_mem_code;
66 int mem_size = get_system_mem_size() >> 20;
67
68 /* Clear the Global 2 Register, PCI Inta Output Enable Registers
69 Bit 1:Enable DAC Powerdown
70 -> 0:DACs are enabled and are working normally
71 1:DACs are powerdown
72 Bit 0:Enable of PCI inta output
73 -> 0 = Disable PCI inta output
74 1 = Enable PCI inta output
75 */
76 PNX8550_GLB2_ENAB_INTA_O = 0;
77
78 /* Calc the PCI mem size code */
79 if (mem_size >= 128)
80 pci_mem_code = SIZE_128M;
81 else if (mem_size >= 64)
82 pci_mem_code = SIZE_64M;
83 else if (mem_size >= 32)
84 pci_mem_code = SIZE_32M;
85 else
86 pci_mem_code = SIZE_16M;
87
88 /* Set PCI_XIO registers */
89 outl(pci_mem_resource.start, PCI_BASE | PCI_BASE1_LO);
90 outl(pci_mem_resource.end + 1, PCI_BASE | PCI_BASE1_HI);
91 outl(pci_io_resource.start, PCI_BASE | PCI_BASE2_LO);
92 outl(pci_io_resource.end, PCI_BASE | PCI_BASE2_HI);
93
94 /* Send memory transaction via PCI_BASE2 */
95 outl(0x00000001, PCI_BASE | PCI_IO);
96
97 /* Unlock the setup register */
98 outl(0xca, PCI_BASE | PCI_UNLOCKREG);
99
100 /*
101 * BAR0 of PNX8550 (pci base 10) must be zero in order for ide
102 * to work, and in order for bus_to_baddr to work without any
103 * hacks.
104 */
105 outl(0x00000000, PCI_BASE | PCI_BASE10);
106
107 /*
108 *These two bars are set by default or the boot code.
109 * However, it's safer to set them here so we're not boot
110 * code dependent.
111 */
112 outl(0x1be00000, PCI_BASE | PCI_BASE14); /* PNX MMIO */
113 outl(PNX8550_NAND_BASE_ADDR, PCI_BASE | PCI_BASE18); /* XIO */
114
115 outl(PCI_EN_TA |
116 PCI_EN_PCI2MMI |
117 PCI_EN_XIO |
118 PCI_SETUP_BASE18_SIZE(SIZE_32M) |
119 PCI_SETUP_BASE18_EN |
120 PCI_SETUP_BASE14_EN |
121 PCI_SETUP_BASE10_PREF |
122 PCI_SETUP_BASE10_SIZE(pci_mem_code) |
123 PCI_SETUP_CFGMANAGE_EN |
124 PCI_SETUP_PCIARB_EN,
125 PCI_BASE |
126 PCI_SETUP); /* PCI_SETUP */
127 outl(0x00000000, PCI_BASE | PCI_CTRL); /* PCI_CONTROL */
128
129 register_pci_controller(&pnx8550_controller);
130
131 return 0;
132}
133
134arch_initcall(pnx8550_pci_setup);
diff --git a/arch/mips/nxp/pnx8550/common/setup.c b/arch/mips/nxp/pnx8550/common/setup.c
deleted file mode 100644
index 71adac323323..000000000000
--- a/arch/mips/nxp/pnx8550/common/setup.c
+++ /dev/null
@@ -1,143 +0,0 @@
1/*
2 *
3 * 2.6 port, Embedded Alley Solutions, Inc
4 *
5 * Based on Per Hallsmark, per.hallsmark@mvista.com
6 *
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 */
20#include <linux/init.h>
21#include <linux/sched.h>
22#include <linux/ioport.h>
23#include <linux/irq.h>
24#include <linux/mm.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/serial_pnx8xxx.h>
28#include <linux/pm.h>
29
30#include <asm/cpu.h>
31#include <asm/bootinfo.h>
32#include <asm/irq.h>
33#include <asm/mipsregs.h>
34#include <asm/reboot.h>
35#include <asm/pgtable.h>
36#include <asm/time.h>
37
38#include <glb.h>
39#include <int.h>
40#include <pci.h>
41#include <uart.h>
42#include <nand.h>
43
44extern void __init board_setup(void);
45extern void pnx8550_machine_restart(char *);
46extern void pnx8550_machine_halt(void);
47extern void pnx8550_machine_power_off(void);
48extern struct resource ioport_resource;
49extern struct resource iomem_resource;
50extern char *prom_getcmdline(void);
51
52struct resource standard_io_resources[] = {
53 {
54 .start = 0x00,
55 .end = 0x1f,
56 .name = "dma1",
57 .flags = IORESOURCE_BUSY
58 }, {
59 .start = 0x40,
60 .end = 0x5f,
61 .name = "timer",
62 .flags = IORESOURCE_BUSY
63 }, {
64 .start = 0x80,
65 .end = 0x8f,
66 .name = "dma page reg",
67 .flags = IORESOURCE_BUSY
68 }, {
69 .start = 0xc0,
70 .end = 0xdf,
71 .name = "dma2",
72 .flags = IORESOURCE_BUSY
73 },
74};
75
76#define STANDARD_IO_RESOURCES ARRAY_SIZE(standard_io_resources)
77
78extern struct resource pci_io_resource;
79extern struct resource pci_mem_resource;
80
81/* Return the total size of DRAM-memory, (RANK0 + RANK1) */
82unsigned long get_system_mem_size(void)
83{
84 /* Read IP2031_RANK0_ADDR_LO */
85 unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010);
86 /* Read IP2031_RANK1_ADDR_HI */
87 unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018);
88
89 return dram_r1_hi - dram_r0_lo + 1;
90}
91
92int pnx8550_console_port = -1;
93
94void __init plat_mem_setup(void)
95{
96 int i;
97 char* argptr;
98
99 board_setup(); /* board specific setup */
100
101 _machine_restart = pnx8550_machine_restart;
102 _machine_halt = pnx8550_machine_halt;
103 pm_power_off = pnx8550_machine_power_off;
104
105 /* Clear the Global 2 Register, PCI Inta Output Enable Registers
106 Bit 1:Enable DAC Powerdown
107 -> 0:DACs are enabled and are working normally
108 1:DACs are powerdown
109 Bit 0:Enable of PCI inta output
110 -> 0 = Disable PCI inta output
111 1 = Enable PCI inta output
112 */
113 PNX8550_GLB2_ENAB_INTA_O = 0;
114
115 /* IO/MEM resources. */
116 set_io_port_base(PNX8550_PORT_BASE);
117 ioport_resource.start = 0;
118 ioport_resource.end = ~0;
119 iomem_resource.start = 0;
120 iomem_resource.end = ~0;
121
122 /* Request I/O space for devices on this board */
123 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
124 request_resource(&ioport_resource, standard_io_resources + i);
125
126 /* Place the Mode Control bit for GPIO pin 16 in primary function */
127 /* Pin 16 is used by UART1, UA1_TX */
128 outl((PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_16_BIT) |
129 (PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_17_BIT),
130 PNX8550_GPIO_MC1);
131
132 argptr = prom_getcmdline();
133 if ((argptr = strstr(argptr, "console=ttyS")) != NULL) {
134 argptr += strlen("console=ttyS");
135 pnx8550_console_port = *argptr == '0' ? 0 : 1;
136
137 /* We must initialize the UART (console) before early printk */
138 /* Set LCR to 8-bit and BAUD to 38400 (no 5) */
139 ip3106_lcr(UART_BASE, pnx8550_console_port) =
140 PNX8XXX_UART_LCR_8BIT;
141 ip3106_baud(UART_BASE, pnx8550_console_port) = 5;
142 }
143}
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
index 4ee57104e47b..b5ce041cdafb 100644
--- a/arch/mips/pci/pci-alchemy.c
+++ b/arch/mips/pci/pci-alchemy.c
@@ -7,6 +7,7 @@
7 * Support for all devices (greater than 16) added by David Gathright. 7 * Support for all devices (greater than 16) added by David Gathright.
8 */ 8 */
9 9
10#include <linux/export.h>
10#include <linux/types.h> 11#include <linux/types.h>
11#include <linux/pci.h> 12#include <linux/pci.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 8656388b34bd..be1e1afe12c3 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -13,6 +13,7 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
16#include <linux/export.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17 18
18#include <asm/pci.h> 19#include <asm/pci.h>
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c
index cf4c868715ac..dcc926e06fce 100644
--- a/arch/mips/pmc-sierra/yosemite/prom.c
+++ b/arch/mips/pmc-sierra/yosemite/prom.c
@@ -102,7 +102,7 @@ void __init prom_init(void)
102 102
103 /* Get the boot parameters */ 103 /* Get the boot parameters */
104 for (i = 1; i < argc; i++) { 104 for (i = 1; i < argc; i++) {
105 if (strlen(arcs_cmdline) + strlen(arg[i] + 1) >= 105 if (strlen(arcs_cmdline) + strlen(arg[i]) + 1 >=
106 sizeof(arcs_cmdline)) 106 sizeof(arcs_cmdline))
107 break; 107 break;
108 108
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b177caa56d95..951e18f5335b 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -345,7 +345,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
345 345
346config KEXEC 346config KEXEC
347 bool "kexec system call (EXPERIMENTAL)" 347 bool "kexec system call (EXPERIMENTAL)"
348 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !47x)) && EXPERIMENTAL 348 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !PPC_47x)) && EXPERIMENTAL
349 help 349 help
350 kexec is a system call that implements the ability to shutdown your 350 kexec is a system call that implements the ability to shutdown your
351 current kernel, and to start another kernel. It is like a reboot 351 current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 57af16edc192..70ba0c0a1223 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -255,12 +255,6 @@ checkbin:
255 echo 'disable kernel modules' ; \ 255 echo 'disable kernel modules' ; \
256 false ; \ 256 false ; \
257 fi 257 fi
258 @if ! /bin/echo dssall | $(AS) -many -o $(TOUT) >/dev/null 2>&1 ; then \
259 echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build ' ; \
260 echo 'correctly with old versions of binutils.' ; \
261 echo '*** Please upgrade your binutils to 2.12.1 or newer' ; \
262 false ; \
263 fi
264 258
265CLEAN_FILES += $(TOUT) 259CLEAN_FILES += $(TOUT)
266 260
diff --git a/arch/powerpc/boot/dts/charon.dts b/arch/powerpc/boot/dts/charon.dts
new file mode 100644
index 000000000000..0e00e508eaa6
--- /dev/null
+++ b/arch/powerpc/boot/dts/charon.dts
@@ -0,0 +1,236 @@
1/*
2 * charon board Device Tree Source
3 *
4 * Copyright (C) 2007 Semihalf
5 * Marian Balakowicz <m8@semihalf.com>
6 *
7 * Copyright (C) 2010 DENX Software Engineering GmbH
8 * Heiko Schocher <hs@denx.de>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16/dts-v1/;
17
18/ {
19 model = "anon,charon";
20 compatible = "anon,charon";
21 #address-cells = <1>;
22 #size-cells = <1>;
23 interrupt-parent = <&mpc5200_pic>;
24
25 cpus {
26 #address-cells = <1>;
27 #size-cells = <0>;
28
29 PowerPC,5200@0 {
30 device_type = "cpu";
31 reg = <0>;
32 d-cache-line-size = <32>;
33 i-cache-line-size = <32>;
34 d-cache-size = <0x4000>; // L1, 16K
35 i-cache-size = <0x4000>; // L1, 16K
36 timebase-frequency = <0>; // from bootloader
37 bus-frequency = <0>; // from bootloader
38 clock-frequency = <0>; // from bootloader
39 };
40 };
41
42 memory {
43 device_type = "memory";
44 reg = <0x00000000 0x08000000>; // 128MB
45 };
46
47 soc5200@f0000000 {
48 #address-cells = <1>;
49 #size-cells = <1>;
50 compatible = "fsl,mpc5200-immr";
51 ranges = <0 0xf0000000 0x0000c000>;
52 reg = <0xf0000000 0x00000100>;
53 bus-frequency = <0>; // from bootloader
54 system-frequency = <0>; // from bootloader
55
56 cdm@200 {
57 compatible = "fsl,mpc5200-cdm";
58 reg = <0x200 0x38>;
59 };
60
61 mpc5200_pic: interrupt-controller@500 {
62 // 5200 interrupts are encoded into two levels;
63 interrupt-controller;
64 #interrupt-cells = <3>;
65 compatible = "fsl,mpc5200-pic";
66 reg = <0x500 0x80>;
67 };
68
69 timer@600 { // General Purpose Timer
70 compatible = "fsl,mpc5200-gpt";
71 reg = <0x600 0x10>;
72 interrupts = <1 9 0>;
73 fsl,has-wdt;
74 };
75
76 can@900 {
77 compatible = "fsl,mpc5200-mscan";
78 interrupts = <2 17 0>;
79 reg = <0x900 0x80>;
80 };
81
82 can@980 {
83 compatible = "fsl,mpc5200-mscan";
84 interrupts = <2 18 0>;
85 reg = <0x980 0x80>;
86 };
87
88 gpio_simple: gpio@b00 {
89 compatible = "fsl,mpc5200-gpio";
90 reg = <0xb00 0x40>;
91 interrupts = <1 7 0>;
92 gpio-controller;
93 #gpio-cells = <2>;
94 };
95
96 usb@1000 {
97 compatible = "fsl,mpc5200-ohci","ohci-be";
98 reg = <0x1000 0xff>;
99 interrupts = <2 6 0>;
100 };
101
102 dma-controller@1200 {
103 device_type = "dma-controller";
104 compatible = "fsl,mpc5200-bestcomm";
105 reg = <0x1200 0x80>;
106 interrupts = <3 0 0 3 1 0 3 2 0 3 3 0
107 3 4 0 3 5 0 3 6 0 3 7 0
108 3 8 0 3 9 0 3 10 0 3 11 0
109 3 12 0 3 13 0 3 14 0 3 15 0>;
110 };
111
112 xlb@1f00 {
113 compatible = "fsl,mpc5200-xlb";
114 reg = <0x1f00 0x100>;
115 };
116
117 serial@2000 { // PSC1
118 compatible = "fsl,mpc5200-psc-uart";
119 reg = <0x2000 0x100>;
120 interrupts = <2 1 0>;
121 };
122
123 serial@2400 { // PSC3
124 compatible = "fsl,mpc5200-psc-uart";
125 reg = <0x2400 0x100>;
126 interrupts = <2 3 0>;
127 };
128
129 ethernet@3000 {
130 compatible = "fsl,mpc5200-fec";
131 reg = <0x3000 0x400>;
132 local-mac-address = [ 00 00 00 00 00 00 ];
133 interrupts = <2 5 0>;
134 fixed-link = <1 1 100 0 0>;
135 };
136
137 mdio@3000 {
138 #address-cells = <1>;
139 #size-cells = <0>;
140 compatible = "fsl,mpc5200-mdio";
141 reg = <0x3000 0x400>; // fec range, since we need to setup fec interrupts
142 interrupts = <2 5 0>; // these are for "mii command finished", not link changes & co.
143 };
144
145 ata@3a00 {
146 compatible = "fsl,mpc5200-ata";
147 reg = <0x3a00 0x100>;
148 interrupts = <2 7 0>;
149 };
150
151 i2c@3d00 {
152 #address-cells = <1>;
153 #size-cells = <0>;
154 compatible = "fsl,mpc5200-i2c","fsl-i2c";
155 reg = <0x3d00 0x40>;
156 interrupts = <2 15 0>;
157 };
158
159
160 i2c@3d40 {
161 #address-cells = <1>;
162 #size-cells = <0>;
163 compatible = "fsl,mpc5200-i2c","fsl-i2c";
164 reg = <0x3d40 0x40>;
165 interrupts = <2 16 0>;
166
167 dtt@28 {
168 compatible = "national,lm80";
169 reg = <0x28>;
170 };
171
172 rtc@68 {
173 compatible = "dallas,ds1374";
174 reg = <0x68>;
175 };
176 };
177
178 sram@8000 {
179 compatible = "fsl,mpc5200-sram";
180 reg = <0x8000 0x4000>;
181 };
182 };
183
184 localbus {
185 compatible = "fsl,mpc5200-lpb","simple-bus";
186 #address-cells = <2>;
187 #size-cells = <1>;
188 ranges = < 0 0 0xfc000000 0x02000000
189 1 0 0xe0000000 0x04000000 // CS1 range, SM501
190 3 0 0xe8000000 0x00080000>;
191
192 flash@0,0 {
193 compatible = "cfi-flash";
194 reg = <0 0 0x02000000>;
195 bank-width = <4>;
196 device-width = <2>;
197 #size-cells = <1>;
198 #address-cells = <1>;
199 };
200
201 display@1,0 {
202 compatible = "smi,sm501";
203 reg = <1 0x00000000 0x00800000
204 1 0x03e00000 0x00200000>;
205 mode = "640x480-32@60";
206 interrupts = <1 1 3>;
207 little-endian;
208 };
209
210 mram0@3,0 {
211 compatible = "mtd-ram";
212 reg = <3 0x00000 0x80000>;
213 bank-width = <1>;
214 };
215 };
216
217 pci@f0000d00 {
218 #interrupt-cells = <1>;
219 #size-cells = <2>;
220 #address-cells = <3>;
221 device_type = "pci";
222 compatible = "fsl,mpc5200-pci";
223 reg = <0xf0000d00 0x100>;
224 interrupt-map-mask = <0xf800 0 0 7>;
225 interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3
226 0xc000 0 0 2 &mpc5200_pic 0 0 3
227 0xc000 0 0 3 &mpc5200_pic 0 0 3
228 0xc000 0 0 4 &mpc5200_pic 0 0 3>;
229 clock-frequency = <0>; // From boot loader
230 interrupts = <2 8 0 2 9 0 2 10 0>;
231 bus-range = <0 0>;
232 ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000
233 0x02000000 0 0x90000000 0x90000000 0 0x10000000
234 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
235 };
236};
diff --git a/arch/powerpc/boot/dts/p1023rds.dts b/arch/powerpc/boot/dts/p1023rds.dts
index d9b776740a67..d3b478242ea9 100644
--- a/arch/powerpc/boot/dts/p1023rds.dts
+++ b/arch/powerpc/boot/dts/p1023rds.dts
@@ -449,6 +449,7 @@
449 interrupt-parent = <&mpic>; 449 interrupt-parent = <&mpic>;
450 interrupts = <16 2>; 450 interrupts = <16 2>;
451 interrupt-map-mask = <0xf800 0 0 7>; 451 interrupt-map-mask = <0xf800 0 0 7>;
452 /* IRQ[0:3] are pulled up on board, set to active-low */
452 interrupt-map = < 453 interrupt-map = <
453 /* IDSEL 0x0 */ 454 /* IDSEL 0x0 */
454 0000 0 0 1 &mpic 0 1 455 0000 0 0 1 &mpic 0 1
@@ -488,11 +489,15 @@
488 interrupt-parent = <&mpic>; 489 interrupt-parent = <&mpic>;
489 interrupts = <16 2>; 490 interrupts = <16 2>;
490 interrupt-map-mask = <0xf800 0 0 7>; 491 interrupt-map-mask = <0xf800 0 0 7>;
492 /*
493 * IRQ[4:6] only for PCIe, set to active-high,
494 * IRQ[7] is pulled up on board, set to active-low
495 */
491 interrupt-map = < 496 interrupt-map = <
492 /* IDSEL 0x0 */ 497 /* IDSEL 0x0 */
493 0000 0 0 1 &mpic 4 1 498 0000 0 0 1 &mpic 4 2
494 0000 0 0 2 &mpic 5 1 499 0000 0 0 2 &mpic 5 2
495 0000 0 0 3 &mpic 6 1 500 0000 0 0 3 &mpic 6 2
496 0000 0 0 4 &mpic 7 1 501 0000 0 0 4 &mpic 7 1
497 >; 502 >;
498 ranges = <0x2000000 0x0 0xa0000000 503 ranges = <0x2000000 0x0 0xa0000000
@@ -527,12 +532,16 @@
527 interrupt-parent = <&mpic>; 532 interrupt-parent = <&mpic>;
528 interrupts = <16 2>; 533 interrupts = <16 2>;
529 interrupt-map-mask = <0xf800 0 0 7>; 534 interrupt-map-mask = <0xf800 0 0 7>;
535 /*
536 * IRQ[8:10] are pulled up on board, set to active-low
537 * IRQ[11] only for PCIe, set to active-high,
538 */
530 interrupt-map = < 539 interrupt-map = <
531 /* IDSEL 0x0 */ 540 /* IDSEL 0x0 */
532 0000 0 0 1 &mpic 8 1 541 0000 0 0 1 &mpic 8 1
533 0000 0 0 2 &mpic 9 1 542 0000 0 0 2 &mpic 9 1
534 0000 0 0 3 &mpic 10 1 543 0000 0 0 3 &mpic 10 1
535 0000 0 0 4 &mpic 11 1 544 0000 0 0 4 &mpic 11 2
536 >; 545 >;
537 ranges = <0x2000000 0x0 0x80000000 546 ranges = <0x2000000 0x0 0x80000000
538 0x2000000 0x0 0x80000000 547 0x2000000 0x0 0x80000000
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index 959cd2cfc275..716a37be16e3 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -1,9 +1,10 @@
1CONFIG_EXPERIMENTAL=y 1CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_SPARSE_IRQ=y
3CONFIG_LOG_BUF_SHIFT=14 4CONFIG_LOG_BUF_SHIFT=14
4CONFIG_BLK_DEV_INITRD=y 5CONFIG_BLK_DEV_INITRD=y
5# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 6# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
6CONFIG_EXPERT=y 7CONFIG_EMBEDDED=y
7# CONFIG_SYSCTL_SYSCALL is not set 8# CONFIG_SYSCTL_SYSCALL is not set
8# CONFIG_KALLSYMS is not set 9# CONFIG_KALLSYMS is not set
9# CONFIG_EPOLL is not set 10# CONFIG_EPOLL is not set
@@ -17,7 +18,6 @@ CONFIG_PPC_MPC5200_SIMPLE=y
17CONFIG_PPC_MPC5200_BUGFIX=y 18CONFIG_PPC_MPC5200_BUGFIX=y
18# CONFIG_PPC_PMAC is not set 19# CONFIG_PPC_PMAC is not set
19CONFIG_PPC_BESTCOMM=y 20CONFIG_PPC_BESTCOMM=y
20CONFIG_SPARSE_IRQ=y
21CONFIG_PM=y 21CONFIG_PM=y
22# CONFIG_PCI is not set 22# CONFIG_PCI is not set
23CONFIG_NET=y 23CONFIG_NET=y
@@ -38,17 +38,18 @@ CONFIG_MTD=y
38CONFIG_MTD_CONCAT=y 38CONFIG_MTD_CONCAT=y
39CONFIG_MTD_PARTITIONS=y 39CONFIG_MTD_PARTITIONS=y
40CONFIG_MTD_CMDLINE_PARTS=y 40CONFIG_MTD_CMDLINE_PARTS=y
41CONFIG_MTD_OF_PARTS=y
41CONFIG_MTD_CHAR=y 42CONFIG_MTD_CHAR=y
42CONFIG_MTD_BLOCK=y 43CONFIG_MTD_BLOCK=y
43CONFIG_MTD_CFI=y 44CONFIG_MTD_CFI=y
44CONFIG_MTD_CFI_AMDSTD=y 45CONFIG_MTD_CFI_AMDSTD=y
45CONFIG_MTD_ROM=y 46CONFIG_MTD_ROM=y
46CONFIG_MTD_PHYSMAP_OF=y 47CONFIG_MTD_PHYSMAP_OF=y
48CONFIG_MTD_PLATRAM=y
47CONFIG_PROC_DEVICETREE=y 49CONFIG_PROC_DEVICETREE=y
48CONFIG_BLK_DEV_LOOP=y 50CONFIG_BLK_DEV_LOOP=y
49CONFIG_BLK_DEV_RAM=y 51CONFIG_BLK_DEV_RAM=y
50CONFIG_BLK_DEV_RAM_SIZE=32768 52CONFIG_BLK_DEV_RAM_SIZE=32768
51# CONFIG_MISC_DEVICES is not set
52CONFIG_BLK_DEV_SD=y 53CONFIG_BLK_DEV_SD=y
53CONFIG_CHR_DEV_SG=y 54CONFIG_CHR_DEV_SG=y
54CONFIG_ATA=y 55CONFIG_ATA=y
@@ -56,13 +57,11 @@ CONFIG_PATA_MPC52xx=y
56CONFIG_PATA_PLATFORM=y 57CONFIG_PATA_PLATFORM=y
57CONFIG_NETDEVICES=y 58CONFIG_NETDEVICES=y
58CONFIG_LXT_PHY=y 59CONFIG_LXT_PHY=y
60CONFIG_FIXED_PHY=y
59CONFIG_NET_ETHERNET=y 61CONFIG_NET_ETHERNET=y
60CONFIG_FEC_MPC52xx=y 62CONFIG_FEC_MPC52xx=y
61# CONFIG_NETDEV_1000 is not set 63# CONFIG_NETDEV_1000 is not set
62# CONFIG_NETDEV_10000 is not set 64# CONFIG_NETDEV_10000 is not set
63# CONFIG_INPUT is not set
64# CONFIG_SERIO is not set
65# CONFIG_VT is not set
66CONFIG_SERIAL_MPC52xx=y 65CONFIG_SERIAL_MPC52xx=y
67CONFIG_SERIAL_MPC52xx_CONSOLE=y 66CONFIG_SERIAL_MPC52xx_CONSOLE=y
68CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 67CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
@@ -70,7 +69,13 @@ CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
70CONFIG_I2C=y 69CONFIG_I2C=y
71CONFIG_I2C_CHARDEV=y 70CONFIG_I2C_CHARDEV=y
72CONFIG_I2C_MPC=y 71CONFIG_I2C_MPC=y
72CONFIG_SENSORS_LM80=y
73CONFIG_WATCHDOG=y 73CONFIG_WATCHDOG=y
74CONFIG_MFD_SM501=y
75CONFIG_FB=y
76CONFIG_FB_FOREIGN_ENDIAN=y
77CONFIG_FB_SM501=y
78CONFIG_FRAMEBUFFER_CONSOLE=y
74CONFIG_USB=y 79CONFIG_USB=y
75CONFIG_USB_DEVICEFS=y 80CONFIG_USB_DEVICEFS=y
76# CONFIG_USB_DEVICE_CLASS is not set 81# CONFIG_USB_DEVICE_CLASS is not set
@@ -80,10 +85,10 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
80CONFIG_USB_STORAGE=y 85CONFIG_USB_STORAGE=y
81CONFIG_RTC_CLASS=y 86CONFIG_RTC_CLASS=y
82CONFIG_RTC_DRV_DS1307=y 87CONFIG_RTC_DRV_DS1307=y
88CONFIG_RTC_DRV_DS1374=y
83CONFIG_EXT2_FS=y 89CONFIG_EXT2_FS=y
84CONFIG_EXT3_FS=y 90CONFIG_EXT3_FS=y
85# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 91# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
86CONFIG_INOTIFY=y
87CONFIG_MSDOS_FS=y 92CONFIG_MSDOS_FS=y
88CONFIG_VFAT_FS=y 93CONFIG_VFAT_FS=y
89CONFIG_PROC_KCORE=y 94CONFIG_PROC_KCORE=y
@@ -102,7 +107,6 @@ CONFIG_DEBUG_KERNEL=y
102CONFIG_DETECT_HUNG_TASK=y 107CONFIG_DETECT_HUNG_TASK=y
103# CONFIG_DEBUG_BUGVERBOSE is not set 108# CONFIG_DEBUG_BUGVERBOSE is not set
104CONFIG_DEBUG_INFO=y 109CONFIG_DEBUG_INFO=y
105# CONFIG_RCU_CPU_STALL_DETECTOR is not set
106CONFIG_CRYPTO_ECB=y 110CONFIG_CRYPTO_ECB=y
107CONFIG_CRYPTO_PCBC=y 111CONFIG_CRYPTO_PCBC=y
108# CONFIG_CRYPTO_ANSI_CPRNG is not set 112# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 6cdf1c0d2c8a..3b98d7354341 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -52,6 +52,8 @@ CONFIG_MTD_CFI=y
52CONFIG_MTD_JEDECPROBE=y 52CONFIG_MTD_JEDECPROBE=y
53CONFIG_MTD_CFI_AMDSTD=y 53CONFIG_MTD_CFI_AMDSTD=y
54CONFIG_MTD_PHYSMAP_OF=y 54CONFIG_MTD_PHYSMAP_OF=y
55CONFIG_MTD_NAND=m
56CONFIG_MTD_NAND_NDFC=m
55CONFIG_MTD_UBI=m 57CONFIG_MTD_UBI=m
56CONFIG_MTD_UBI_GLUEBI=m 58CONFIG_MTD_UBI_GLUEBI=m
57CONFIG_PROC_DEVICETREE=y 59CONFIG_PROC_DEVICETREE=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 84a685a505fe..535711fcb13c 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -485,3 +485,7 @@ CONFIG_CRYPTO_TWOFISH=m
485CONFIG_CRYPTO_LZO=m 485CONFIG_CRYPTO_LZO=m
486# CONFIG_CRYPTO_ANSI_CPRNG is not set 486# CONFIG_CRYPTO_ANSI_CPRNG is not set
487# CONFIG_CRYPTO_HW is not set 487# CONFIG_CRYPTO_HW is not set
488CONFIG_VIRTUALIZATION=y
489CONFIG_KVM_BOOK3S_64=m
490CONFIG_KVM_BOOK3S_64_HV=y
491CONFIG_VHOST_NET=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 96a58b709705..a72f2415a647 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -362,3 +362,7 @@ CONFIG_CRYPTO_TWOFISH=m
362CONFIG_CRYPTO_LZO=m 362CONFIG_CRYPTO_LZO=m
363# CONFIG_CRYPTO_ANSI_CPRNG is not set 363# CONFIG_CRYPTO_ANSI_CPRNG is not set
364# CONFIG_CRYPTO_HW is not set 364# CONFIG_CRYPTO_HW is not set
365CONFIG_VIRTUALIZATION=y
366CONFIG_KVM_BOOK3S_64=m
367CONFIG_KVM_BOOK3S_64_HV=y
368CONFIG_VHOST_NET=m
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index e2a4c26ad377..02e41b53488d 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -49,13 +49,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
49 int t; 49 int t;
50 50
51 __asm__ __volatile__( 51 __asm__ __volatile__(
52 PPC_RELEASE_BARRIER 52 PPC_ATOMIC_ENTRY_BARRIER
53"1: lwarx %0,0,%2 # atomic_add_return\n\ 53"1: lwarx %0,0,%2 # atomic_add_return\n\
54 add %0,%1,%0\n" 54 add %0,%1,%0\n"
55 PPC405_ERR77(0,%2) 55 PPC405_ERR77(0,%2)
56" stwcx. %0,0,%2 \n\ 56" stwcx. %0,0,%2 \n\
57 bne- 1b" 57 bne- 1b"
58 PPC_ACQUIRE_BARRIER 58 PPC_ATOMIC_EXIT_BARRIER
59 : "=&r" (t) 59 : "=&r" (t)
60 : "r" (a), "r" (&v->counter) 60 : "r" (a), "r" (&v->counter)
61 : "cc", "memory"); 61 : "cc", "memory");
@@ -85,13 +85,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
85 int t; 85 int t;
86 86
87 __asm__ __volatile__( 87 __asm__ __volatile__(
88 PPC_RELEASE_BARRIER 88 PPC_ATOMIC_ENTRY_BARRIER
89"1: lwarx %0,0,%2 # atomic_sub_return\n\ 89"1: lwarx %0,0,%2 # atomic_sub_return\n\
90 subf %0,%1,%0\n" 90 subf %0,%1,%0\n"
91 PPC405_ERR77(0,%2) 91 PPC405_ERR77(0,%2)
92" stwcx. %0,0,%2 \n\ 92" stwcx. %0,0,%2 \n\
93 bne- 1b" 93 bne- 1b"
94 PPC_ACQUIRE_BARRIER 94 PPC_ATOMIC_EXIT_BARRIER
95 : "=&r" (t) 95 : "=&r" (t)
96 : "r" (a), "r" (&v->counter) 96 : "r" (a), "r" (&v->counter)
97 : "cc", "memory"); 97 : "cc", "memory");
@@ -119,13 +119,13 @@ static __inline__ int atomic_inc_return(atomic_t *v)
119 int t; 119 int t;
120 120
121 __asm__ __volatile__( 121 __asm__ __volatile__(
122 PPC_RELEASE_BARRIER 122 PPC_ATOMIC_ENTRY_BARRIER
123"1: lwarx %0,0,%1 # atomic_inc_return\n\ 123"1: lwarx %0,0,%1 # atomic_inc_return\n\
124 addic %0,%0,1\n" 124 addic %0,%0,1\n"
125 PPC405_ERR77(0,%1) 125 PPC405_ERR77(0,%1)
126" stwcx. %0,0,%1 \n\ 126" stwcx. %0,0,%1 \n\
127 bne- 1b" 127 bne- 1b"
128 PPC_ACQUIRE_BARRIER 128 PPC_ATOMIC_EXIT_BARRIER
129 : "=&r" (t) 129 : "=&r" (t)
130 : "r" (&v->counter) 130 : "r" (&v->counter)
131 : "cc", "xer", "memory"); 131 : "cc", "xer", "memory");
@@ -163,13 +163,13 @@ static __inline__ int atomic_dec_return(atomic_t *v)
163 int t; 163 int t;
164 164
165 __asm__ __volatile__( 165 __asm__ __volatile__(
166 PPC_RELEASE_BARRIER 166 PPC_ATOMIC_ENTRY_BARRIER
167"1: lwarx %0,0,%1 # atomic_dec_return\n\ 167"1: lwarx %0,0,%1 # atomic_dec_return\n\
168 addic %0,%0,-1\n" 168 addic %0,%0,-1\n"
169 PPC405_ERR77(0,%1) 169 PPC405_ERR77(0,%1)
170" stwcx. %0,0,%1\n\ 170" stwcx. %0,0,%1\n\
171 bne- 1b" 171 bne- 1b"
172 PPC_ACQUIRE_BARRIER 172 PPC_ATOMIC_EXIT_BARRIER
173 : "=&r" (t) 173 : "=&r" (t)
174 : "r" (&v->counter) 174 : "r" (&v->counter)
175 : "cc", "xer", "memory"); 175 : "cc", "xer", "memory");
@@ -194,7 +194,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
194 int t; 194 int t;
195 195
196 __asm__ __volatile__ ( 196 __asm__ __volatile__ (
197 PPC_RELEASE_BARRIER 197 PPC_ATOMIC_ENTRY_BARRIER
198"1: lwarx %0,0,%1 # __atomic_add_unless\n\ 198"1: lwarx %0,0,%1 # __atomic_add_unless\n\
199 cmpw 0,%0,%3 \n\ 199 cmpw 0,%0,%3 \n\
200 beq- 2f \n\ 200 beq- 2f \n\
@@ -202,7 +202,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
202 PPC405_ERR77(0,%2) 202 PPC405_ERR77(0,%2)
203" stwcx. %0,0,%1 \n\ 203" stwcx. %0,0,%1 \n\
204 bne- 1b \n" 204 bne- 1b \n"
205 PPC_ACQUIRE_BARRIER 205 PPC_ATOMIC_EXIT_BARRIER
206" subf %0,%2,%0 \n\ 206" subf %0,%2,%0 \n\
2072:" 2072:"
208 : "=&r" (t) 208 : "=&r" (t)
@@ -226,7 +226,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
226 int t; 226 int t;
227 227
228 __asm__ __volatile__( 228 __asm__ __volatile__(
229 PPC_RELEASE_BARRIER 229 PPC_ATOMIC_ENTRY_BARRIER
230"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ 230"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
231 cmpwi %0,1\n\ 231 cmpwi %0,1\n\
232 addi %0,%0,-1\n\ 232 addi %0,%0,-1\n\
@@ -234,7 +234,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
234 PPC405_ERR77(0,%1) 234 PPC405_ERR77(0,%1)
235" stwcx. %0,0,%1\n\ 235" stwcx. %0,0,%1\n\
236 bne- 1b" 236 bne- 1b"
237 PPC_ACQUIRE_BARRIER 237 PPC_ATOMIC_EXIT_BARRIER
238 "\n\ 238 "\n\
2392:" : "=&b" (t) 2392:" : "=&b" (t)
240 : "r" (&v->counter) 240 : "r" (&v->counter)
@@ -285,12 +285,12 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v)
285 long t; 285 long t;
286 286
287 __asm__ __volatile__( 287 __asm__ __volatile__(
288 PPC_RELEASE_BARRIER 288 PPC_ATOMIC_ENTRY_BARRIER
289"1: ldarx %0,0,%2 # atomic64_add_return\n\ 289"1: ldarx %0,0,%2 # atomic64_add_return\n\
290 add %0,%1,%0\n\ 290 add %0,%1,%0\n\
291 stdcx. %0,0,%2 \n\ 291 stdcx. %0,0,%2 \n\
292 bne- 1b" 292 bne- 1b"
293 PPC_ACQUIRE_BARRIER 293 PPC_ATOMIC_EXIT_BARRIER
294 : "=&r" (t) 294 : "=&r" (t)
295 : "r" (a), "r" (&v->counter) 295 : "r" (a), "r" (&v->counter)
296 : "cc", "memory"); 296 : "cc", "memory");
@@ -319,12 +319,12 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
319 long t; 319 long t;
320 320
321 __asm__ __volatile__( 321 __asm__ __volatile__(
322 PPC_RELEASE_BARRIER 322 PPC_ATOMIC_ENTRY_BARRIER
323"1: ldarx %0,0,%2 # atomic64_sub_return\n\ 323"1: ldarx %0,0,%2 # atomic64_sub_return\n\
324 subf %0,%1,%0\n\ 324 subf %0,%1,%0\n\
325 stdcx. %0,0,%2 \n\ 325 stdcx. %0,0,%2 \n\
326 bne- 1b" 326 bne- 1b"
327 PPC_ACQUIRE_BARRIER 327 PPC_ATOMIC_EXIT_BARRIER
328 : "=&r" (t) 328 : "=&r" (t)
329 : "r" (a), "r" (&v->counter) 329 : "r" (a), "r" (&v->counter)
330 : "cc", "memory"); 330 : "cc", "memory");
@@ -351,12 +351,12 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
351 long t; 351 long t;
352 352
353 __asm__ __volatile__( 353 __asm__ __volatile__(
354 PPC_RELEASE_BARRIER 354 PPC_ATOMIC_ENTRY_BARRIER
355"1: ldarx %0,0,%1 # atomic64_inc_return\n\ 355"1: ldarx %0,0,%1 # atomic64_inc_return\n\
356 addic %0,%0,1\n\ 356 addic %0,%0,1\n\
357 stdcx. %0,0,%1 \n\ 357 stdcx. %0,0,%1 \n\
358 bne- 1b" 358 bne- 1b"
359 PPC_ACQUIRE_BARRIER 359 PPC_ATOMIC_EXIT_BARRIER
360 : "=&r" (t) 360 : "=&r" (t)
361 : "r" (&v->counter) 361 : "r" (&v->counter)
362 : "cc", "xer", "memory"); 362 : "cc", "xer", "memory");
@@ -393,12 +393,12 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
393 long t; 393 long t;
394 394
395 __asm__ __volatile__( 395 __asm__ __volatile__(
396 PPC_RELEASE_BARRIER 396 PPC_ATOMIC_ENTRY_BARRIER
397"1: ldarx %0,0,%1 # atomic64_dec_return\n\ 397"1: ldarx %0,0,%1 # atomic64_dec_return\n\
398 addic %0,%0,-1\n\ 398 addic %0,%0,-1\n\
399 stdcx. %0,0,%1\n\ 399 stdcx. %0,0,%1\n\
400 bne- 1b" 400 bne- 1b"
401 PPC_ACQUIRE_BARRIER 401 PPC_ATOMIC_EXIT_BARRIER
402 : "=&r" (t) 402 : "=&r" (t)
403 : "r" (&v->counter) 403 : "r" (&v->counter)
404 : "cc", "xer", "memory"); 404 : "cc", "xer", "memory");
@@ -418,13 +418,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
418 long t; 418 long t;
419 419
420 __asm__ __volatile__( 420 __asm__ __volatile__(
421 PPC_RELEASE_BARRIER 421 PPC_ATOMIC_ENTRY_BARRIER
422"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ 422"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
423 addic. %0,%0,-1\n\ 423 addic. %0,%0,-1\n\
424 blt- 2f\n\ 424 blt- 2f\n\
425 stdcx. %0,0,%1\n\ 425 stdcx. %0,0,%1\n\
426 bne- 1b" 426 bne- 1b"
427 PPC_ACQUIRE_BARRIER 427 PPC_ATOMIC_EXIT_BARRIER
428 "\n\ 428 "\n\
4292:" : "=&r" (t) 4292:" : "=&r" (t)
430 : "r" (&v->counter) 430 : "r" (&v->counter)
@@ -450,14 +450,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
450 long t; 450 long t;
451 451
452 __asm__ __volatile__ ( 452 __asm__ __volatile__ (
453 PPC_RELEASE_BARRIER 453 PPC_ATOMIC_ENTRY_BARRIER
454"1: ldarx %0,0,%1 # __atomic_add_unless\n\ 454"1: ldarx %0,0,%1 # __atomic_add_unless\n\
455 cmpd 0,%0,%3 \n\ 455 cmpd 0,%0,%3 \n\
456 beq- 2f \n\ 456 beq- 2f \n\
457 add %0,%2,%0 \n" 457 add %0,%2,%0 \n"
458" stdcx. %0,0,%1 \n\ 458" stdcx. %0,0,%1 \n\
459 bne- 1b \n" 459 bne- 1b \n"
460 PPC_ACQUIRE_BARRIER 460 PPC_ATOMIC_EXIT_BARRIER
461" subf %0,%2,%0 \n\ 461" subf %0,%2,%0 \n\
4622:" 4622:"
463 : "=&r" (t) 463 : "=&r" (t)
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index e137afcc10fa..efdc92618b38 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -124,14 +124,14 @@ static __inline__ unsigned long fn( \
124 return (old & mask); \ 124 return (old & mask); \
125} 125}
126 126
127DEFINE_TESTOP(test_and_set_bits, or, PPC_RELEASE_BARRIER, 127DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
128 PPC_ACQUIRE_BARRIER, 0) 128 PPC_ATOMIC_EXIT_BARRIER, 0)
129DEFINE_TESTOP(test_and_set_bits_lock, or, "", 129DEFINE_TESTOP(test_and_set_bits_lock, or, "",
130 PPC_ACQUIRE_BARRIER, 1) 130 PPC_ACQUIRE_BARRIER, 1)
131DEFINE_TESTOP(test_and_clear_bits, andc, PPC_RELEASE_BARRIER, 131DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
132 PPC_ACQUIRE_BARRIER, 0) 132 PPC_ATOMIC_EXIT_BARRIER, 0)
133DEFINE_TESTOP(test_and_change_bits, xor, PPC_RELEASE_BARRIER, 133DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
134 PPC_ACQUIRE_BARRIER, 0) 134 PPC_ATOMIC_EXIT_BARRIER, 0)
135 135
136static __inline__ int test_and_set_bit(unsigned long nr, 136static __inline__ int test_and_set_bit(unsigned long nr,
137 volatile unsigned long *addr) 137 volatile unsigned long *addr)
diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h
index 24bd34c57e9d..936a904ae78c 100644
--- a/arch/powerpc/include/asm/floppy.h
+++ b/arch/powerpc/include/asm/floppy.h
@@ -108,10 +108,10 @@ static int fd_request_irq(void)
108{ 108{
109 if (can_use_virtual_dma) 109 if (can_use_virtual_dma)
110 return request_irq(FLOPPY_IRQ, floppy_hardint, 110 return request_irq(FLOPPY_IRQ, floppy_hardint,
111 IRQF_DISABLED, "floppy", NULL); 111 0, "floppy", NULL);
112 else 112 else
113 return request_irq(FLOPPY_IRQ, floppy_interrupt, 113 return request_irq(FLOPPY_IRQ, floppy_interrupt,
114 IRQF_DISABLED, "floppy", NULL); 114 0, "floppy", NULL);
115} 115}
116 116
117static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) 117static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index c94e4a3fe2ef..2a9cf845473b 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -11,12 +11,13 @@
11 11
12#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 12#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
13 __asm__ __volatile ( \ 13 __asm__ __volatile ( \
14 PPC_RELEASE_BARRIER \ 14 PPC_ATOMIC_ENTRY_BARRIER \
15"1: lwarx %0,0,%2\n" \ 15"1: lwarx %0,0,%2\n" \
16 insn \ 16 insn \
17 PPC405_ERR77(0, %2) \ 17 PPC405_ERR77(0, %2) \
18"2: stwcx. %1,0,%2\n" \ 18"2: stwcx. %1,0,%2\n" \
19 "bne- 1b\n" \ 19 "bne- 1b\n" \
20 PPC_ATOMIC_EXIT_BARRIER \
20 "li %1,0\n" \ 21 "li %1,0\n" \
21"3: .section .fixup,\"ax\"\n" \ 22"3: .section .fixup,\"ax\"\n" \
22"4: li %1,%3\n" \ 23"4: li %1,%3\n" \
@@ -92,14 +93,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
92 return -EFAULT; 93 return -EFAULT;
93 94
94 __asm__ __volatile__ ( 95 __asm__ __volatile__ (
95 PPC_RELEASE_BARRIER 96 PPC_ATOMIC_ENTRY_BARRIER
96"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ 97"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
97 cmpw 0,%1,%4\n\ 98 cmpw 0,%1,%4\n\
98 bne- 3f\n" 99 bne- 3f\n"
99 PPC405_ERR77(0,%3) 100 PPC405_ERR77(0,%3)
100"2: stwcx. %5,0,%3\n\ 101"2: stwcx. %5,0,%3\n\
101 bne- 1b\n" 102 bne- 1b\n"
102 PPC_ACQUIRE_BARRIER 103 PPC_ATOMIC_EXIT_BARRIER
103"3: .section .fixup,\"ax\"\n\ 104"3: .section .fixup,\"ax\"\n\
1044: li %0,%6\n\ 1054: li %0,%6\n\
105 b 3b\n\ 106 b 3b\n\
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index 08fe69edcd10..0ad432bc81d6 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -149,12 +149,6 @@ struct kvm_regs {
149#define KVM_SREGS_E_UPDATE_DBSR (1 << 3) 149#define KVM_SREGS_E_UPDATE_DBSR (1 << 3)
150 150
151/* 151/*
152 * Book3S special bits to indicate contents in the struct by maintaining
153 * backwards compatibility with older structs. If adding a new field,
154 * please make sure to add a flag for that new field */
155#define KVM_SREGS_S_HIOR (1 << 0)
156
157/*
158 * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a 152 * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a
159 * previous KVM_GET_REGS. 153 * previous KVM_GET_REGS.
160 * 154 *
@@ -179,8 +173,6 @@ struct kvm_sregs {
179 __u64 ibat[8]; 173 __u64 ibat[8];
180 __u64 dbat[8]; 174 __u64 dbat[8];
181 } ppc32; 175 } ppc32;
182 __u64 flags; /* KVM_SREGS_S_ */
183 __u64 hior;
184 } s; 176 } s;
185 struct { 177 struct {
186 union { 178 union {
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index a384ffdf33de..d4df013ad779 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -90,8 +90,6 @@ struct kvmppc_vcpu_book3s {
90#endif 90#endif
91 int context_id[SID_CONTEXTS]; 91 int context_id[SID_CONTEXTS];
92 92
93 bool hior_sregs; /* HIOR is set by SREGS, not PVR */
94
95 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; 93 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
96 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; 94 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
97 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 95 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
diff --git a/arch/powerpc/include/asm/lv1call.h b/arch/powerpc/include/asm/lv1call.h
index 9cd5fc828a37..f77c708c67a0 100644
--- a/arch/powerpc/include/asm/lv1call.h
+++ b/arch/powerpc/include/asm/lv1call.h
@@ -316,7 +316,7 @@ LV1_CALL(gpu_context_free, 1, 0, 218 )
316LV1_CALL(gpu_context_iomap, 5, 0, 221 ) 316LV1_CALL(gpu_context_iomap, 5, 0, 221 )
317LV1_CALL(gpu_context_attribute, 6, 0, 225 ) 317LV1_CALL(gpu_context_attribute, 6, 0, 225 )
318LV1_CALL(gpu_context_intr, 1, 1, 227 ) 318LV1_CALL(gpu_context_intr, 1, 1, 227 )
319LV1_CALL(gpu_attribute, 5, 0, 228 ) 319LV1_CALL(gpu_attribute, 3, 0, 228 )
320LV1_CALL(get_rtc, 0, 2, 232 ) 320LV1_CALL(get_rtc, 0, 2, 232 )
321LV1_CALL(set_ppe_periodic_tracer_frequency, 1, 0, 240 ) 321LV1_CALL(set_ppe_periodic_tracer_frequency, 1, 0, 240 )
322LV1_CALL(start_ppe_periodic_tracer, 5, 0, 241 ) 322LV1_CALL(start_ppe_periodic_tracer, 5, 0, 241 )
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 28cdbd9f399c..03c48e819c8e 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -31,7 +31,7 @@
31 31
32#define MSR_ MSR_ME | MSR_CE 32#define MSR_ MSR_ME | MSR_CE
33#define MSR_KERNEL MSR_ | MSR_64BIT 33#define MSR_KERNEL MSR_ | MSR_64BIT
34#define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE 34#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
35#define MSR_USER64 MSR_USER32 | MSR_64BIT 35#define MSR_USER64 MSR_USER32 | MSR_64BIT
36#elif defined (CONFIG_40x) 36#elif defined (CONFIG_40x)
37#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) 37#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 6fbce725c710..a0f358d4a00c 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -8,7 +8,7 @@
8 8
9#ifdef __powerpc64__ 9#ifdef __powerpc64__
10 10
11extern char _end[]; 11extern char __end_interrupts[];
12 12
13static inline int in_kernel_text(unsigned long addr) 13static inline int in_kernel_text(unsigned long addr)
14{ 14{
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index d7cab44643c5..e682a7143edb 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -13,6 +13,7 @@
13extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; 13extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
14extern void do_lwsync_fixups(unsigned long value, void *fixup_start, 14extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
15 void *fixup_end); 15 void *fixup_end);
16extern void do_final_fixups(void);
16 17
17static inline void eieio(void) 18static inline void eieio(void)
18{ 19{
@@ -41,11 +42,15 @@ static inline void isync(void)
41 START_LWSYNC_SECTION(97); \ 42 START_LWSYNC_SECTION(97); \
42 isync; \ 43 isync; \
43 MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup); 44 MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
44#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) 45#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
45#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" 46#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
47#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
48#define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
46#else 49#else
47#define PPC_ACQUIRE_BARRIER 50#define PPC_ACQUIRE_BARRIER
48#define PPC_RELEASE_BARRIER 51#define PPC_RELEASE_BARRIER
52#define PPC_ATOMIC_ENTRY_BARRIER
53#define PPC_ATOMIC_EXIT_BARRIER
49#endif 54#endif
50 55
51#endif /* __KERNEL__ */ 56#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index bd6c401c0ee5..c48de98ba94e 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -15,8 +15,8 @@
15#define DEFAULT_PRIORITY 5 15#define DEFAULT_PRIORITY 5
16 16
17/* 17/*
18 * Mark IPIs as higher priority so we can take them inside interrupts that 18 * Mark IPIs as higher priority so we can take them inside interrupts
19 * arent marked IRQF_DISABLED 19 * FIXME: still true now?
20 */ 20 */
21#define IPI_PRIORITY 4 21#define IPI_PRIORITY 4
22 22
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 56212bc0ab08..4f80cf1ce77b 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -215,7 +215,22 @@ reenable_mmu: /* re-enable mmu so we can */
215 stw r9,8(r1) 215 stw r9,8(r1)
216 stw r11,12(r1) 216 stw r11,12(r1)
217 stw r3,ORIG_GPR3(r1) 217 stw r3,ORIG_GPR3(r1)
218 /*
219 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
220 * If from user mode there is only one stack frame on the stack, and
221 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
222 * stack frame to make trace_hardirqs_off happy.
223 */
224 andi. r12,r12,MSR_PR
225 beq 11f
226 stwu r1,-16(r1)
227 bl trace_hardirqs_off
228 addi r1,r1,16
229 b 12f
230
23111:
218 bl trace_hardirqs_off 232 bl trace_hardirqs_off
23312:
219 lwz r0,GPR0(r1) 234 lwz r0,GPR0(r1)
220 lwz r3,ORIG_GPR3(r1) 235 lwz r3,ORIG_GPR3(r1)
221 lwz r4,GPR4(r1) 236 lwz r4,GPR4(r1)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a54d92fec612..cf9c69b9189c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -267,7 +267,7 @@ vsx_unavailable_pSeries_1:
267 267
268#ifdef CONFIG_CBE_RAS 268#ifdef CONFIG_CBE_RAS
269 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) 269 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
270 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202) 270 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
271#endif /* CONFIG_CBE_RAS */ 271#endif /* CONFIG_CBE_RAS */
272 272
273 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 273 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
@@ -275,7 +275,7 @@ vsx_unavailable_pSeries_1:
275 275
276#ifdef CONFIG_CBE_RAS 276#ifdef CONFIG_CBE_RAS
277 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 277 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
278 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 278 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
279#endif /* CONFIG_CBE_RAS */ 279#endif /* CONFIG_CBE_RAS */
280 280
281 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) 281 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
@@ -283,7 +283,7 @@ vsx_unavailable_pSeries_1:
283 283
284#ifdef CONFIG_CBE_RAS 284#ifdef CONFIG_CBE_RAS
285 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) 285 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
286 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802) 286 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
287#endif /* CONFIG_CBE_RAS */ 287#endif /* CONFIG_CBE_RAS */
288 288
289 . = 0x3000 289 . = 0x3000
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index 368d158d665d..a1ed8a8c7cb4 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -11,6 +11,7 @@
11#include <linux/jump_label.h> 11#include <linux/jump_label.h>
12#include <asm/code-patching.h> 12#include <asm/code-patching.h>
13 13
14#ifdef HAVE_JUMP_LABEL
14void arch_jump_label_transform(struct jump_entry *entry, 15void arch_jump_label_transform(struct jump_entry *entry,
15 enum jump_label_type type) 16 enum jump_label_type type)
16{ 17{
@@ -21,3 +22,4 @@ void arch_jump_label_transform(struct jump_entry *entry,
21 else 22 else
22 patch_instruction(addr, PPC_INST_NOP); 23 patch_instruction(addr, PPC_INST_NOP);
23} 24}
25#endif
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 35f27646c4ff..2985338d0e10 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -132,7 +132,6 @@ static void kvm_patch_ins_b(u32 *inst, int addr)
132 /* On relocatable kernels interrupts handlers and our code 132 /* On relocatable kernels interrupts handlers and our code
133 can be in different regions, so we don't patch them */ 133 can be in different regions, so we don't patch them */
134 134
135 extern u32 __end_interrupts;
136 if ((ulong)inst < (ulong)&__end_interrupts) 135 if ((ulong)inst < (ulong)&__end_interrupts)
137 return; 136 return;
138#endif 137#endif
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index f7d760ab5ca1..7cd07b42ca1a 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -738,7 +738,7 @@ relocate_new_kernel:
738 mr r5, r31 738 mr r5, r31
739 739
740 li r0, 0 740 li r0, 0
741#elif defined(CONFIG_44x) && !defined(CONFIG_47x) 741#elif defined(CONFIG_44x) && !defined(CONFIG_PPC_47x)
742 742
743/* 743/*
744 * Code for setting up 1:1 mapping for PPC440x for KEXEC 744 * Code for setting up 1:1 mapping for PPC440x for KEXEC
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9054ca9ab4f9..6457574c0b2f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -486,28 +486,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
486 new_thread = &new->thread; 486 new_thread = &new->thread;
487 old_thread = &current->thread; 487 old_thread = &current->thread;
488 488
489#if defined(CONFIG_PPC_BOOK3E_64)
490 /* XXX Current Book3E code doesn't deal with kernel side DBCR0,
491 * we always hold the user values, so we set it now.
492 *
493 * However, we ensure the kernel MSR:DE is appropriately cleared too
494 * to avoid spurrious single step exceptions in the kernel.
495 *
496 * This will have to change to merge with the ppc32 code at some point,
497 * but I don't like much what ppc32 is doing today so there's some
498 * thinking needed there
499 */
500 if ((new_thread->dbcr0 | old_thread->dbcr0) & DBCR0_IDM) {
501 u32 dbcr0;
502
503 mtmsr(mfmsr() & ~MSR_DE);
504 isync();
505 dbcr0 = mfspr(SPRN_DBCR0);
506 dbcr0 = (dbcr0 & DBCR0_EDM) | new_thread->dbcr0;
507 mtspr(SPRN_DBCR0, dbcr0);
508 }
509#endif /* CONFIG_PPC64_BOOK3E */
510
511#ifdef CONFIG_PPC64 489#ifdef CONFIG_PPC64
512 /* 490 /*
513 * Collect processor utilization data per process 491 * Collect processor utilization data per process
@@ -657,7 +635,7 @@ void show_regs(struct pt_regs * regs)
657 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) 635 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
658 printk("CFAR: "REG"\n", regs->orig_gpr3); 636 printk("CFAR: "REG"\n", regs->orig_gpr3);
659 if (trap == 0x300 || trap == 0x600) 637 if (trap == 0x300 || trap == 0x600)
660#ifdef CONFIG_PPC_ADV_DEBUG_REGS 638#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
661 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); 639 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
662#else 640#else
663 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); 641 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index b4fa66127495..cc584865b3df 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1579,10 +1579,8 @@ static void __init prom_instantiate_rtas(void)
1579 return; 1579 return;
1580 1580
1581 base = alloc_down(size, PAGE_SIZE, 0); 1581 base = alloc_down(size, PAGE_SIZE, 0);
1582 if (base == 0) { 1582 if (base == 0)
1583 prom_printf("RTAS allocation failed !\n"); 1583 prom_panic("Could not allocate memory for RTAS\n");
1584 return;
1585 }
1586 1584
1587 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); 1585 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1588 if (!IHANDLE_VALID(rtas_inst)) { 1586 if (!IHANDLE_VALID(rtas_inst)) {
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index c1ce86357ecb..ac7610815113 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -107,6 +107,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
107 PTRRELOC(&__start___lwsync_fixup), 107 PTRRELOC(&__start___lwsync_fixup),
108 PTRRELOC(&__stop___lwsync_fixup)); 108 PTRRELOC(&__stop___lwsync_fixup));
109 109
110 do_final_fixups();
111
110 return KERNELBASE + offset; 112 return KERNELBASE + offset;
111} 113}
112 114
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 1a9dea80a69b..fb9bb46e7e88 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -359,6 +359,7 @@ void __init setup_system(void)
359 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); 359 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
360 do_lwsync_fixups(cur_cpu_spec->cpu_features, 360 do_lwsync_fixups(cur_cpu_spec->cpu_features,
361 &__start___lwsync_fixup, &__stop___lwsync_fixup); 361 &__start___lwsync_fixup, &__stop___lwsync_fixup);
362 do_final_fixups();
362 363
363 /* 364 /*
364 * Unflatten the device-tree passed by prom_init or kexec 365 * Unflatten the device-tree passed by prom_init or kexec
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 78b76dc54dfb..836a5a19eb2c 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -97,7 +97,7 @@ static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
97 compat_sigset_t cset; 97 compat_sigset_t cset;
98 98
99 switch (_NSIG_WORDS) { 99 switch (_NSIG_WORDS) {
100 case 4: cset.sig[5] = set->sig[3] & 0xffffffffull; 100 case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
101 cset.sig[7] = set->sig[3] >> 32; 101 cset.sig[7] = set->sig[3] >> 32;
102 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull; 102 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
103 cset.sig[5] = set->sig[2] >> 32; 103 cset.sig[5] = set->sig[2] >> 32;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 25ddbfc7dd36..6df70907d60a 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -187,7 +187,7 @@ int smp_request_message_ipi(int virq, int msg)
187 return 1; 187 return 1;
188 } 188 }
189#endif 189#endif
190 err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU, 190 err = request_irq(virq, smp_ipi_action[msg], IRQF_PERCPU,
191 smp_ipi_name[msg], 0); 191 smp_ipi_name[msg], 0);
192 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", 192 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
193 virq, smp_ipi_name[msg], err); 193 virq, smp_ipi_name[msg], err);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 4e5908264d1a..5459d148a0f6 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1298,14 +1298,12 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1298 1298
1299 if (user_mode(regs)) { 1299 if (user_mode(regs)) {
1300 current->thread.dbcr0 &= ~DBCR0_IC; 1300 current->thread.dbcr0 &= ~DBCR0_IC;
1301#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1302 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, 1301 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0,
1303 current->thread.dbcr1)) 1302 current->thread.dbcr1))
1304 regs->msr |= MSR_DE; 1303 regs->msr |= MSR_DE;
1305 else 1304 else
1306 /* Make sure the IDM bit is off */ 1305 /* Make sure the IDM bit is off */
1307 current->thread.dbcr0 &= ~DBCR0_IDM; 1306 current->thread.dbcr0 &= ~DBCR0_IDM;
1308#endif
1309 } 1307 }
1310 1308
1311 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1309 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0cdbc07cec14..0cb137a9b038 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -44,6 +44,7 @@
44#include <asm/processor.h> 44#include <asm/processor.h>
45#include <asm/cputhreads.h> 45#include <asm/cputhreads.h>
46#include <asm/page.h> 46#include <asm/page.h>
47#include <asm/hvcall.h>
47#include <linux/gfp.h> 48#include <linux/gfp.h>
48#include <linux/sched.h> 49#include <linux/sched.h>
49#include <linux/vmalloc.h> 50#include <linux/vmalloc.h>
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index f422231d9235..44d8829334ab 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1263,7 +1263,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1263 addi r6,r5,VCORE_NAPPING_THREADS 1263 addi r6,r5,VCORE_NAPPING_THREADS
126431: lwarx r4,0,r6 126431: lwarx r4,0,r6
1265 or r4,r4,r0 1265 or r4,r4,r0
1266 popcntw r7,r4 1266 PPC_POPCNTW(r7,r4)
1267 cmpw r7,r8 1267 cmpw r7,r8
1268 bge 2f 1268 bge 2f
1269 stwcx. r4,0,r6 1269 stwcx. r4,0,r6
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index bc4d50dec78b..3c791e1eb675 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -151,16 +151,14 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
151#ifdef CONFIG_PPC_BOOK3S_64 151#ifdef CONFIG_PPC_BOOK3S_64
152 if ((pvr >= 0x330000) && (pvr < 0x70330000)) { 152 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
153 kvmppc_mmu_book3s_64_init(vcpu); 153 kvmppc_mmu_book3s_64_init(vcpu);
154 if (!to_book3s(vcpu)->hior_sregs) 154 to_book3s(vcpu)->hior = 0xfff00000;
155 to_book3s(vcpu)->hior = 0xfff00000;
156 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; 155 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
157 vcpu->arch.cpu_type = KVM_CPU_3S_64; 156 vcpu->arch.cpu_type = KVM_CPU_3S_64;
158 } else 157 } else
159#endif 158#endif
160 { 159 {
161 kvmppc_mmu_book3s_32_init(vcpu); 160 kvmppc_mmu_book3s_32_init(vcpu);
162 if (!to_book3s(vcpu)->hior_sregs) 161 to_book3s(vcpu)->hior = 0;
163 to_book3s(vcpu)->hior = 0;
164 to_book3s(vcpu)->msr_mask = 0xffffffffULL; 162 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
165 vcpu->arch.cpu_type = KVM_CPU_3S_32; 163 vcpu->arch.cpu_type = KVM_CPU_3S_32;
166 } 164 }
@@ -797,9 +795,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
797 } 795 }
798 } 796 }
799 797
800 if (sregs->u.s.flags & KVM_SREGS_S_HIOR)
801 sregs->u.s.hior = to_book3s(vcpu)->hior;
802
803 return 0; 798 return 0;
804} 799}
805 800
@@ -836,11 +831,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
836 /* Flush the MMU after messing with the segments */ 831 /* Flush the MMU after messing with the segments */
837 kvmppc_mmu_pte_flush(vcpu, 0, 0); 832 kvmppc_mmu_pte_flush(vcpu, 0, 0);
838 833
839 if (sregs->u.s.flags & KVM_SREGS_S_HIOR) {
840 to_book3s(vcpu)->hior_sregs = true;
841 to_book3s(vcpu)->hior = sregs->u.s.hior;
842 }
843
844 return 0; 834 return 0;
845} 835}
846 836
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index efbf9ad87203..607fbdf24b84 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -208,7 +208,6 @@ int kvm_dev_ioctl_check_extension(long ext)
208 case KVM_CAP_PPC_BOOKE_SREGS: 208 case KVM_CAP_PPC_BOOKE_SREGS:
209#else 209#else
210 case KVM_CAP_PPC_SEGSTATE: 210 case KVM_CAP_PPC_SEGSTATE:
211 case KVM_CAP_PPC_HIOR:
212 case KVM_CAP_PPC_PAPR: 211 case KVM_CAP_PPC_PAPR:
213#endif 212#endif
214 case KVM_CAP_PPC_UNSET_IRQ: 213 case KVM_CAP_PPC_UNSET_IRQ:
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 0d08d0171392..7a8a7487cee8 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -18,6 +18,8 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <asm/cputable.h> 19#include <asm/cputable.h>
20#include <asm/code-patching.h> 20#include <asm/code-patching.h>
21#include <asm/page.h>
22#include <asm/sections.h>
21 23
22 24
23struct fixup_entry { 25struct fixup_entry {
@@ -128,6 +130,27 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
128 } 130 }
129} 131}
130 132
133void do_final_fixups(void)
134{
135#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
136 int *src, *dest;
137 unsigned long length;
138
139 if (PHYSICAL_START == 0)
140 return;
141
142 src = (int *)(KERNELBASE + PHYSICAL_START);
143 dest = (int *)KERNELBASE;
144 length = (__end_interrupts - _stext) / sizeof(int);
145
146 while (length--) {
147 patch_instruction(dest, *src);
148 src++;
149 dest++;
150 }
151#endif
152}
153
131#ifdef CONFIG_FTR_FIXUP_SELFTEST 154#ifdef CONFIG_FTR_FIXUP_SELFTEST
132 155
133#define check(x) \ 156#define check(x) \
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5964371303ac..8558b572e55d 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -15,6 +15,7 @@
15#include <linux/of_fdt.h> 15#include <linux/of_fdt.h>
16#include <linux/memblock.h> 16#include <linux/memblock.h>
17#include <linux/bootmem.h> 17#include <linux/bootmem.h>
18#include <linux/moduleparam.h>
18#include <asm/pgtable.h> 19#include <asm/pgtable.h>
19#include <asm/pgalloc.h> 20#include <asm/pgalloc.h>
20#include <asm/tlb.h> 21#include <asm/tlb.h>
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 16da595ff402..2dd6bdd31fe1 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -34,6 +34,7 @@
34#include <linux/suspend.h> 34#include <linux/suspend.h>
35#include <linux/memblock.h> 35#include <linux/memblock.h>
36#include <linux/hugetlb.h> 36#include <linux/hugetlb.h>
37#include <linux/slab.h>
37 38
38#include <asm/pgalloc.h> 39#include <asm/pgalloc.h>
39#include <asm/prom.h> 40#include <asm/prom.h>
@@ -555,3 +556,32 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
555 book3e_hugetlb_preload(vma->vm_mm, address, *ptep); 556 book3e_hugetlb_preload(vma->vm_mm, address, *ptep);
556#endif 557#endif
557} 558}
559
560/*
561 * System memory should not be in /proc/iomem but various tools expect it
562 * (eg kdump).
563 */
564static int add_system_ram_resources(void)
565{
566 struct memblock_region *reg;
567
568 for_each_memblock(memory, reg) {
569 struct resource *res;
570 unsigned long base = reg->base;
571 unsigned long size = reg->size;
572
573 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
574 WARN_ON(!res);
575
576 if (res) {
577 res->name = "System RAM";
578 res->start = base;
579 res->end = base + size - 1;
580 res->flags = IORESOURCE_MEM;
581 WARN_ON(request_resource(&iomem_resource, res) < 0);
582 }
583 }
584
585 return 0;
586}
587subsys_initcall(add_system_ram_resources);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index c7dd4dec4df8..b22a83a91cb8 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -315,7 +315,10 @@ static int __init find_min_common_depth(void)
315 struct device_node *root; 315 struct device_node *root;
316 const char *vec5; 316 const char *vec5;
317 317
318 root = of_find_node_by_path("/rtas"); 318 if (firmware_has_feature(FW_FEATURE_OPAL))
319 root = of_find_node_by_path("/ibm,opal");
320 else
321 root = of_find_node_by_path("/rtas");
319 if (!root) 322 if (!root)
320 root = of_find_node_by_path("/"); 323 root = of_find_node_by_path("/");
321 324
@@ -344,12 +347,19 @@ static int __init find_min_common_depth(void)
344 347
345#define VEC5_AFFINITY_BYTE 5 348#define VEC5_AFFINITY_BYTE 5
346#define VEC5_AFFINITY 0x80 349#define VEC5_AFFINITY 0x80
347 chosen = of_find_node_by_path("/chosen"); 350
348 if (chosen) { 351 if (firmware_has_feature(FW_FEATURE_OPAL))
349 vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL); 352 form1_affinity = 1;
350 if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) { 353 else {
351 dbg("Using form 1 affinity\n"); 354 chosen = of_find_node_by_path("/chosen");
352 form1_affinity = 1; 355 if (chosen) {
356 vec5 = of_get_property(chosen,
357 "ibm,architecture-vec-5", NULL);
358 if (vec5 && (vec5[VEC5_AFFINITY_BYTE] &
359 VEC5_AFFINITY)) {
360 dbg("Using form 1 affinity\n");
361 form1_affinity = 1;
362 }
353 } 363 }
354 } 364 }
355 365
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index e36d6e232ae6..846b789fb195 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -50,6 +50,7 @@ static void __init mpc5200_simple_setup_arch(void)
50 50
51/* list of the supported boards */ 51/* list of the supported boards */
52static const char *board[] __initdata = { 52static const char *board[] __initdata = {
53 "anon,charon",
53 "intercontrol,digsy-mtc", 54 "intercontrol,digsy-mtc",
54 "manroland,mucmc52", 55 "manroland,mucmc52",
55 "manroland,uc101", 56 "manroland,uc101",
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 45023e26aea3..d7946be298b6 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -203,7 +203,7 @@ config P3060_QDS
203 select PPC_E500MC 203 select PPC_E500MC
204 select PHYS_64BIT 204 select PHYS_64BIT
205 select SWIOTLB 205 select SWIOTLB
206 select MPC8xxx_GPIO 206 select GPIO_MPC8XXX
207 select HAS_RAPIDIO 207 select HAS_RAPIDIO
208 select PPC_EPAPR_HV_PIC 208 select PPC_EPAPR_HV_PIC
209 help 209 help
diff --git a/arch/powerpc/platforms/85xx/p3060_qds.c b/arch/powerpc/platforms/85xx/p3060_qds.c
index 01dcf44871e9..081cf4ac1881 100644
--- a/arch/powerpc/platforms/85xx/p3060_qds.c
+++ b/arch/powerpc/platforms/85xx/p3060_qds.c
@@ -70,7 +70,7 @@ define_machine(p3060_qds) {
70 .power_save = e500_idle, 70 .power_save = e500_idle,
71}; 71};
72 72
73machine_device_initcall(p3060_qds, declare_of_platform_devices); 73machine_device_initcall(p3060_qds, corenet_ds_publish_devices);
74 74
75#ifdef CONFIG_SWIOTLB 75#ifdef CONFIG_SWIOTLB
76machine_arch_initcall(p3060_qds, swiotlb_setup_bus_notifier); 76machine_arch_initcall(p3060_qds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index e4588721ef34..3fe6d927ad70 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -347,7 +347,7 @@ config SIMPLE_GPIO
347 347
348config MCU_MPC8349EMITX 348config MCU_MPC8349EMITX
349 bool "MPC8349E-mITX MCU driver" 349 bool "MPC8349E-mITX MCU driver"
350 depends on I2C && PPC_83xx 350 depends on I2C=y && PPC_83xx
351 select GENERIC_GPIO 351 select GENERIC_GPIO
352 select ARCH_REQUIRE_GPIOLIB 352 select ARCH_REQUIRE_GPIOLIB
353 help 353 help
diff --git a/arch/powerpc/platforms/cell/beat.c b/arch/powerpc/platforms/cell/beat.c
index 232fc384e855..852592b2b712 100644
--- a/arch/powerpc/platforms/cell/beat.c
+++ b/arch/powerpc/platforms/cell/beat.c
@@ -230,7 +230,7 @@ static int __init beat_register_event(void)
230 } 230 }
231 ev->virq = virq; 231 ev->virq = virq;
232 232
233 rc = request_irq(virq, ev->handler, IRQF_DISABLED, 233 rc = request_irq(virq, ev->handler, 0,
234 ev->typecode, NULL); 234 ev->typecode, NULL);
235 if (rc != 0) { 235 if (rc != 0) {
236 printk(KERN_ERR "Beat: failed to request virtual IRQ" 236 printk(KERN_ERR "Beat: failed to request virtual IRQ"
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
index ae790ac4a589..14be2bd358b8 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -514,7 +514,7 @@ static __init int celleb_setup_pciex(struct device_node *node,
514 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 514 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
515 oirq.size); 515 oirq.size);
516 if (request_irq(virq, pciex_handle_internal_irq, 516 if (request_irq(virq, pciex_handle_internal_irq,
517 IRQF_DISABLED, "pciex", (void *)phb)) { 517 0, "pciex", (void *)phb)) {
518 pr_err("PCIEXC:Failed to request irq\n"); 518 pr_err("PCIEXC:Failed to request irq\n");
519 goto error; 519 goto error;
520 } 520 }
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index fc46fcac3921..592c3d51b817 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -412,8 +412,7 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
412 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); 412 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
413 BUG_ON(virq == NO_IRQ); 413 BUG_ON(virq == NO_IRQ);
414 414
415 ret = request_irq(virq, ioc_interrupt, IRQF_DISABLED, 415 ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu);
416 iommu->name, iommu);
417 BUG_ON(ret); 416 BUG_ON(ret);
418 417
419 /* set the IOC segment table origin register (and turn on the iommu) */ 418 /* set the IOC segment table origin register (and turn on the iommu) */
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
index 1acf36010423..59c1a1694104 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -392,7 +392,7 @@ static int __init cbe_init_pm_irq(void)
392 } 392 }
393 393
394 rc = request_irq(irq, cbe_pm_irq, 394 rc = request_irq(irq, cbe_pm_irq,
395 IRQF_DISABLED, "cbe-pmu-0", NULL); 395 0, "cbe-pmu-0", NULL);
396 if (rc) { 396 if (rc) {
397 printk("ERROR: Request for irq on node %d failed\n", 397 printk("ERROR: Request for irq on node %d failed\n",
398 node); 398 node);
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 3675da73623f..e94d3ecdd8bb 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -442,8 +442,7 @@ static int spu_request_irqs(struct spu *spu)
442 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", 442 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
443 spu->number); 443 spu->number);
444 ret = request_irq(spu->irqs[0], spu_irq_class_0, 444 ret = request_irq(spu->irqs[0], spu_irq_class_0,
445 IRQF_DISABLED, 445 0, spu->irq_c0, spu);
446 spu->irq_c0, spu);
447 if (ret) 446 if (ret)
448 goto bail0; 447 goto bail0;
449 } 448 }
@@ -451,8 +450,7 @@ static int spu_request_irqs(struct spu *spu)
451 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", 450 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
452 spu->number); 451 spu->number);
453 ret = request_irq(spu->irqs[1], spu_irq_class_1, 452 ret = request_irq(spu->irqs[1], spu_irq_class_1,
454 IRQF_DISABLED, 453 0, spu->irq_c1, spu);
455 spu->irq_c1, spu);
456 if (ret) 454 if (ret)
457 goto bail1; 455 goto bail1;
458 } 456 }
@@ -460,8 +458,7 @@ static int spu_request_irqs(struct spu *spu)
460 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", 458 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
461 spu->number); 459 spu->number);
462 ret = request_irq(spu->irqs[2], spu_irq_class_2, 460 ret = request_irq(spu->irqs[2], spu_irq_class_2,
463 IRQF_DISABLED, 461 0, spu->irq_c2, spu);
464 spu->irq_c2, spu);
465 if (ret) 462 if (ret)
466 goto bail2; 463 goto bail2;
467 } 464 }
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index cb40e921a565..901bfbddc3dd 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -272,7 +272,6 @@ static struct irqaction xmon_action = {
272 272
273static struct irqaction gatwick_cascade_action = { 273static struct irqaction gatwick_cascade_action = {
274 .handler = gatwick_action, 274 .handler = gatwick_action,
275 .flags = IRQF_DISABLED,
276 .name = "cascade", 275 .name = "cascade",
277}; 276};
278 277
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 9a521dc8e485..9b6a820bdd7d 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -200,7 +200,7 @@ static int psurge_secondary_ipi_init(void)
200 200
201 if (psurge_secondary_virq) 201 if (psurge_secondary_virq)
202 rc = request_irq(psurge_secondary_virq, psurge_ipi_intr, 202 rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
203 IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); 203 IRQF_PERCPU, "IPI", NULL);
204 204
205 if (rc) 205 if (rc)
206 pr_err("Failed to setup secondary cpu IPI\n"); 206 pr_err("Failed to setup secondary cpu IPI\n");
@@ -408,7 +408,7 @@ static int __init smp_psurge_kick_cpu(int nr)
408 408
409static struct irqaction psurge_irqaction = { 409static struct irqaction psurge_irqaction = {
410 .handler = psurge_ipi_intr, 410 .handler = psurge_ipi_intr,
411 .flags = IRQF_DISABLED|IRQF_PERCPU, 411 .flags = IRQF_PERCPU,
412 .name = "primary IPI", 412 .name = "primary IPI",
413}; 413};
414 414
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index 6c4b5837fc8a..3f175e8aedb4 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -825,7 +825,7 @@ static int ps3_probe_thread(void *data)
825 825
826 spin_lock_init(&dev.lock); 826 spin_lock_init(&dev.lock);
827 827
828 res = request_irq(irq, ps3_notification_interrupt, IRQF_DISABLED, 828 res = request_irq(irq, ps3_notification_interrupt, 0,
829 "ps3_notification", &dev); 829 "ps3_notification", &dev);
830 if (res) { 830 if (res) {
831 pr_err("%s:%u: request_irq failed %d\n", __func__, __LINE__, 831 pr_err("%s:%u: request_irq failed %d\n", __func__, __LINE__,
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 404bc52b7806..1d6f4f478fe2 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -88,6 +88,7 @@ struct ps3_private {
88 struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN))); 88 struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
89 u64 ppe_id; 89 u64 ppe_id;
90 u64 thread_id; 90 u64 thread_id;
91 unsigned long ipi_mask;
91}; 92};
92 93
93static DEFINE_PER_CPU(struct ps3_private, ps3_private); 94static DEFINE_PER_CPU(struct ps3_private, ps3_private);
@@ -144,7 +145,11 @@ static void ps3_chip_unmask(struct irq_data *d)
144static void ps3_chip_eoi(struct irq_data *d) 145static void ps3_chip_eoi(struct irq_data *d)
145{ 146{
146 const struct ps3_private *pd = irq_data_get_irq_chip_data(d); 147 const struct ps3_private *pd = irq_data_get_irq_chip_data(d);
147 lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq); 148
149 /* non-IPIs are EOIed here. */
150
151 if (!test_bit(63 - d->irq, &pd->ipi_mask))
152 lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
148} 153}
149 154
150/** 155/**
@@ -691,6 +696,16 @@ void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
691 cpu, virq, pd->bmp.ipi_debug_brk_mask); 696 cpu, virq, pd->bmp.ipi_debug_brk_mask);
692} 697}
693 698
699void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
700{
701 struct ps3_private *pd = &per_cpu(ps3_private, cpu);
702
703 set_bit(63 - virq, &pd->ipi_mask);
704
705 DBG("%s:%d: cpu %u, virq %u, ipi_mask %lxh\n", __func__, __LINE__,
706 cpu, virq, pd->ipi_mask);
707}
708
694static unsigned int ps3_get_irq(void) 709static unsigned int ps3_get_irq(void)
695{ 710{
696 struct ps3_private *pd = &__get_cpu_var(ps3_private); 711 struct ps3_private *pd = &__get_cpu_var(ps3_private);
@@ -720,6 +735,12 @@ static unsigned int ps3_get_irq(void)
720 BUG(); 735 BUG();
721 } 736 }
722#endif 737#endif
738
739 /* IPIs are EOIed here. */
740
741 if (test_bit(63 - plug, &pd->ipi_mask))
742 lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, plug);
743
723 return plug; 744 return plug;
724} 745}
725 746
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
index 9a196a88eda7..1a633ed0fe98 100644
--- a/arch/powerpc/platforms/ps3/platform.h
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -43,6 +43,7 @@ void ps3_mm_shutdown(void);
43void ps3_init_IRQ(void); 43void ps3_init_IRQ(void);
44void ps3_shutdown_IRQ(int cpu); 44void ps3_shutdown_IRQ(int cpu);
45void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq); 45void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq);
46void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq);
46 47
47/* smp */ 48/* smp */
48 49
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
index 5e304c292f68..ca40f6afd35d 100644
--- a/arch/powerpc/platforms/ps3/repository.c
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -184,7 +184,7 @@ int ps3_repository_read_bus_type(unsigned int bus_index,
184 enum ps3_bus_type *bus_type) 184 enum ps3_bus_type *bus_type)
185{ 185{
186 int result; 186 int result;
187 u64 v1; 187 u64 v1 = 0;
188 188
189 result = read_node(PS3_LPAR_ID_PME, 189 result = read_node(PS3_LPAR_ID_PME,
190 make_first_field("bus", bus_index), 190 make_first_field("bus", bus_index),
@@ -199,7 +199,7 @@ int ps3_repository_read_bus_num_dev(unsigned int bus_index,
199 unsigned int *num_dev) 199 unsigned int *num_dev)
200{ 200{
201 int result; 201 int result;
202 u64 v1; 202 u64 v1 = 0;
203 203
204 result = read_node(PS3_LPAR_ID_PME, 204 result = read_node(PS3_LPAR_ID_PME,
205 make_first_field("bus", bus_index), 205 make_first_field("bus", bus_index),
@@ -239,7 +239,7 @@ int ps3_repository_read_dev_type(unsigned int bus_index,
239 unsigned int dev_index, enum ps3_dev_type *dev_type) 239 unsigned int dev_index, enum ps3_dev_type *dev_type)
240{ 240{
241 int result; 241 int result;
242 u64 v1; 242 u64 v1 = 0;
243 243
244 result = read_node(PS3_LPAR_ID_PME, 244 result = read_node(PS3_LPAR_ID_PME,
245 make_first_field("bus", bus_index), 245 make_first_field("bus", bus_index),
@@ -256,8 +256,8 @@ int ps3_repository_read_dev_intr(unsigned int bus_index,
256 enum ps3_interrupt_type *intr_type, unsigned int *interrupt_id) 256 enum ps3_interrupt_type *intr_type, unsigned int *interrupt_id)
257{ 257{
258 int result; 258 int result;
259 u64 v1; 259 u64 v1 = 0;
260 u64 v2; 260 u64 v2 = 0;
261 261
262 result = read_node(PS3_LPAR_ID_PME, 262 result = read_node(PS3_LPAR_ID_PME,
263 make_first_field("bus", bus_index), 263 make_first_field("bus", bus_index),
@@ -275,7 +275,7 @@ int ps3_repository_read_dev_reg_type(unsigned int bus_index,
275 enum ps3_reg_type *reg_type) 275 enum ps3_reg_type *reg_type)
276{ 276{
277 int result; 277 int result;
278 u64 v1; 278 u64 v1 = 0;
279 279
280 result = read_node(PS3_LPAR_ID_PME, 280 result = read_node(PS3_LPAR_ID_PME,
281 make_first_field("bus", bus_index), 281 make_first_field("bus", bus_index),
@@ -615,7 +615,7 @@ int ps3_repository_read_stor_dev_num_regions(unsigned int bus_index,
615 unsigned int dev_index, unsigned int *num_regions) 615 unsigned int dev_index, unsigned int *num_regions)
616{ 616{
617 int result; 617 int result;
618 u64 v1; 618 u64 v1 = 0;
619 619
620 result = read_node(PS3_LPAR_ID_PME, 620 result = read_node(PS3_LPAR_ID_PME,
621 make_first_field("bus", bus_index), 621 make_first_field("bus", bus_index),
@@ -631,7 +631,7 @@ int ps3_repository_read_stor_dev_region_id(unsigned int bus_index,
631 unsigned int *region_id) 631 unsigned int *region_id)
632{ 632{
633 int result; 633 int result;
634 u64 v1; 634 u64 v1 = 0;
635 635
636 result = read_node(PS3_LPAR_ID_PME, 636 result = read_node(PS3_LPAR_ID_PME,
637 make_first_field("bus", bus_index), 637 make_first_field("bus", bus_index),
@@ -786,7 +786,7 @@ int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size, u64 *region_total)
786int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved) 786int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved)
787{ 787{
788 int result; 788 int result;
789 u64 v1; 789 u64 v1 = 0;
790 790
791 result = read_node(PS3_LPAR_ID_CURRENT, 791 result = read_node(PS3_LPAR_ID_CURRENT,
792 make_first_field("bi", 0), 792 make_first_field("bi", 0),
@@ -805,7 +805,7 @@ int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved)
805int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id) 805int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id)
806{ 806{
807 int result; 807 int result;
808 u64 v1; 808 u64 v1 = 0;
809 809
810 result = read_node(PS3_LPAR_ID_CURRENT, 810 result = read_node(PS3_LPAR_ID_CURRENT,
811 make_first_field("bi", 0), 811 make_first_field("bi", 0),
@@ -827,8 +827,8 @@ int ps3_repository_read_spu_resource_id(unsigned int res_index,
827 enum ps3_spu_resource_type *resource_type, unsigned int *resource_id) 827 enum ps3_spu_resource_type *resource_type, unsigned int *resource_id)
828{ 828{
829 int result; 829 int result;
830 u64 v1; 830 u64 v1 = 0;
831 u64 v2; 831 u64 v2 = 0;
832 832
833 result = read_node(PS3_LPAR_ID_CURRENT, 833 result = read_node(PS3_LPAR_ID_CURRENT,
834 make_first_field("bi", 0), 834 make_first_field("bi", 0),
@@ -854,7 +854,7 @@ static int ps3_repository_read_boot_dat_address(u64 *address)
854int ps3_repository_read_boot_dat_size(unsigned int *size) 854int ps3_repository_read_boot_dat_size(unsigned int *size)
855{ 855{
856 int result; 856 int result;
857 u64 v1; 857 u64 v1 = 0;
858 858
859 result = read_node(PS3_LPAR_ID_CURRENT, 859 result = read_node(PS3_LPAR_ID_CURRENT,
860 make_first_field("bi", 0), 860 make_first_field("bi", 0),
@@ -869,7 +869,7 @@ int ps3_repository_read_boot_dat_size(unsigned int *size)
869int ps3_repository_read_vuart_av_port(unsigned int *port) 869int ps3_repository_read_vuart_av_port(unsigned int *port)
870{ 870{
871 int result; 871 int result;
872 u64 v1; 872 u64 v1 = 0;
873 873
874 result = read_node(PS3_LPAR_ID_CURRENT, 874 result = read_node(PS3_LPAR_ID_CURRENT,
875 make_first_field("bi", 0), 875 make_first_field("bi", 0),
@@ -884,7 +884,7 @@ int ps3_repository_read_vuart_av_port(unsigned int *port)
884int ps3_repository_read_vuart_sysmgr_port(unsigned int *port) 884int ps3_repository_read_vuart_sysmgr_port(unsigned int *port)
885{ 885{
886 int result; 886 int result;
887 u64 v1; 887 u64 v1 = 0;
888 888
889 result = read_node(PS3_LPAR_ID_CURRENT, 889 result = read_node(PS3_LPAR_ID_CURRENT,
890 make_first_field("bi", 0), 890 make_first_field("bi", 0),
@@ -919,7 +919,7 @@ int ps3_repository_read_boot_dat_info(u64 *lpar_addr, unsigned int *size)
919int ps3_repository_read_num_be(unsigned int *num_be) 919int ps3_repository_read_num_be(unsigned int *num_be)
920{ 920{
921 int result; 921 int result;
922 u64 v1; 922 u64 v1 = 0;
923 923
924 result = read_node(PS3_LPAR_ID_PME, 924 result = read_node(PS3_LPAR_ID_PME,
925 make_first_field("ben", 0), 925 make_first_field("ben", 0),
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 4c44794faac0..efc1cd8c034a 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -59,46 +59,49 @@ static void ps3_smp_message_pass(int cpu, int msg)
59 59
60static int ps3_smp_probe(void) 60static int ps3_smp_probe(void)
61{ 61{
62 return 2; 62 int cpu;
63}
64 63
65static void __init ps3_smp_setup_cpu(int cpu) 64 for (cpu = 0; cpu < 2; cpu++) {
66{ 65 int result;
67 int result; 66 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
68 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu); 67 int i;
69 int i;
70 68
71 DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu); 69 DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
72 70
73 /* 71 /*
74 * Check assumptions on ps3_ipi_virqs[] indexing. If this 72 * Check assumptions on ps3_ipi_virqs[] indexing. If this
75 * check fails, then a different mapping of PPC_MSG_ 73 * check fails, then a different mapping of PPC_MSG_
76 * to index needs to be setup. 74 * to index needs to be setup.
77 */ 75 */
78 76
79 BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0); 77 BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
80 BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1); 78 BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
81 BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2); 79 BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2);
82 BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3); 80 BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
83 81
84 for (i = 0; i < MSG_COUNT; i++) { 82 for (i = 0; i < MSG_COUNT; i++) {
85 result = ps3_event_receive_port_setup(cpu, &virqs[i]); 83 result = ps3_event_receive_port_setup(cpu, &virqs[i]);
86 84
87 if (result) 85 if (result)
88 continue; 86 continue;
89 87
90 DBG("%s:%d: (%d, %d) => virq %u\n", 88 DBG("%s:%d: (%d, %d) => virq %u\n",
91 __func__, __LINE__, cpu, i, virqs[i]); 89 __func__, __LINE__, cpu, i, virqs[i]);
92 90
93 result = smp_request_message_ipi(virqs[i], i); 91 result = smp_request_message_ipi(virqs[i], i);
94 92
95 if (result) 93 if (result)
96 virqs[i] = NO_IRQ; 94 virqs[i] = NO_IRQ;
97 } 95 else
96 ps3_register_ipi_irq(cpu, virqs[i]);
97 }
98 98
99 ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]); 99 ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
100 100
101 DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu); 101 DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
102 }
103
104 return 2;
102} 105}
103 106
104void ps3_smp_cleanup_cpu(int cpu) 107void ps3_smp_cleanup_cpu(int cpu)
@@ -121,7 +124,6 @@ static struct smp_ops_t ps3_smp_ops = {
121 .probe = ps3_smp_probe, 124 .probe = ps3_smp_probe,
122 .message_pass = ps3_smp_message_pass, 125 .message_pass = ps3_smp_message_pass,
123 .kick_cpu = smp_generic_kick_cpu, 126 .kick_cpu = smp_generic_kick_cpu,
124 .setup_cpu = ps3_smp_setup_cpu,
125}; 127};
126 128
127void smp_init_ps3(void) 129void smp_init_ps3(void)
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index af1a5df46b3e..b6731e4a6646 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -280,6 +280,7 @@ void __init ehv_pic_init(void)
280 280
281 if (!ehv_pic->irqhost) { 281 if (!ehv_pic->irqhost) {
282 of_node_put(np); 282 of_node_put(np);
283 kfree(ehv_pic);
283 return; 284 return;
284 } 285 }
285 286
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index c4d96fa32ba5..d5c3c90ee698 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -328,6 +328,7 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
328err: 328err:
329 iounmap(fsl_lbc_ctrl_dev->regs); 329 iounmap(fsl_lbc_ctrl_dev->regs);
330 kfree(fsl_lbc_ctrl_dev); 330 kfree(fsl_lbc_ctrl_dev);
331 fsl_lbc_ctrl_dev = NULL;
331 return ret; 332 return ret;
332} 333}
333 334
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index de170fd5ba4e..22ffccd8bef5 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -23,7 +23,7 @@
23 */ 23 */
24 24
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/export.h> 26#include <linux/module.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0842c6f8a3e6..8c7e8528e7c4 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -800,8 +800,6 @@ static void mpic_end_ipi(struct irq_data *d)
800 * IPIs are marked IRQ_PER_CPU. This has the side effect of 800 * IPIs are marked IRQ_PER_CPU. This has the side effect of
801 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from 801 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
802 * applying to them. We EOI them late to avoid re-entering. 802 * applying to them. We EOI them late to avoid re-entering.
803 * We mark IPI's with IRQF_DISABLED as they must run with
804 * irqs disabled.
805 */ 803 */
806 mpic_eoi(mpic); 804 mpic_eoi(mpic);
807} 805}
diff --git a/arch/powerpc/sysdev/ppc4xx_soc.c b/arch/powerpc/sysdev/ppc4xx_soc.c
index d3d6ce3c33b4..0debcc31ad70 100644
--- a/arch/powerpc/sysdev/ppc4xx_soc.c
+++ b/arch/powerpc/sysdev/ppc4xx_soc.c
@@ -115,7 +115,7 @@ static int __init ppc4xx_l2c_probe(void)
115 } 115 }
116 116
117 /* Install error handler */ 117 /* Install error handler */
118 if (request_irq(irq, l2c_error_handler, IRQF_DISABLED, "L2C", 0) < 0) { 118 if (request_irq(irq, l2c_error_handler, 0, "L2C", 0) < 0) {
119 printk(KERN_ERR "Cannot install L2C error handler" 119 printk(KERN_ERR "Cannot install L2C error handler"
120 ", cache is not enabled\n"); 120 ", cache is not enabled\n");
121 of_node_put(np); 121 of_node_put(np);
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 3363fbc964f8..ceb09cbd2329 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -216,7 +216,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
216 /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says 216 /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
217 that the BRG divisor must be even if you're not using divide-by-16 217 that the BRG divisor must be even if you're not using divide-by-16
218 mode. */ 218 mode. */
219 if (!div16 && (divisor & 1)) 219 if (!div16 && (divisor & 1) && (divisor > 3))
220 divisor++; 220 divisor++;
221 221
222 tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | 222 tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 3d93a8ded0f8..63762c672a03 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -134,11 +134,10 @@ static void xics_request_ipi(void)
134 BUG_ON(ipi == NO_IRQ); 134 BUG_ON(ipi == NO_IRQ);
135 135
136 /* 136 /*
137 * IPIs are marked IRQF_DISABLED as they must run with irqs 137 * IPIs are marked IRQF_PERCPU. The handler was set in map.
138 * disabled, and PERCPU. The handler was set in map.
139 */ 138 */
140 BUG_ON(request_irq(ipi, icp_ops->ipi_action, 139 BUG_ON(request_irq(ipi, icp_ops->ipi_action,
141 IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL)); 140 IRQF_PERCPU, "IPI", NULL));
142} 141}
143 142
144int __init xics_smp_probe(void) 143int __init xics_smp_probe(void)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index a9fbd43395f7..373679b3744a 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -572,6 +572,7 @@ config KEXEC
572config CRASH_DUMP 572config CRASH_DUMP
573 bool "kernel crash dumps" 573 bool "kernel crash dumps"
574 depends on 64BIT 574 depends on 64BIT
575 select KEXEC
575 help 576 help
576 Generate crash dump after being started by kexec. 577 Generate crash dump after being started by kexec.
577 Crash dump kernels are loaded in the main kernel with kexec-tools 578 Crash dump kernels are loaded in the main kernel with kexec-tools
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 49676771bd66..ffd1ac255f19 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -368,9 +368,12 @@ static inline int crypt_s390_func_available(int func,
368 368
369 if (facility_mask & CRYPT_S390_MSA && !test_facility(17)) 369 if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
370 return 0; 370 return 0;
371 if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76)) 371
372 if (facility_mask & CRYPT_S390_MSA3 &&
373 (!test_facility(2) || !test_facility(76)))
372 return 0; 374 return 0;
373 if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77)) 375 if (facility_mask & CRYPT_S390_MSA4 &&
376 (!test_facility(2) || !test_facility(77)))
374 return 0; 377 return 0;
375 378
376 switch (func & CRYPT_S390_OP_MASK) { 379 switch (func & CRYPT_S390_OP_MASK) {
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 24e18473d926..b0c235cb6ad5 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -47,7 +47,7 @@ struct sca_block {
47#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) 47#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
48#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) 48#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
49 49
50#define CPUSTAT_HOST 0x80000000 50#define CPUSTAT_STOPPED 0x80000000
51#define CPUSTAT_WAIT 0x10000000 51#define CPUSTAT_WAIT 0x10000000
52#define CPUSTAT_ECALL_PEND 0x08000000 52#define CPUSTAT_ECALL_PEND 0x08000000
53#define CPUSTAT_STOP_INT 0x04000000 53#define CPUSTAT_STOP_INT 0x04000000
@@ -139,6 +139,7 @@ struct kvm_vcpu_stat {
139 u32 instruction_stfl; 139 u32 instruction_stfl;
140 u32 instruction_tprot; 140 u32 instruction_tprot;
141 u32 instruction_sigp_sense; 141 u32 instruction_sigp_sense;
142 u32 instruction_sigp_sense_running;
142 u32 instruction_sigp_external_call; 143 u32 instruction_sigp_external_call;
143 u32 instruction_sigp_emergency; 144 u32 instruction_sigp_emergency;
144 u32 instruction_sigp_stop; 145 u32 instruction_sigp_stop;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 34ede0ea85a9..4f289ff0b7fe 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -593,14 +593,16 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
593 unsigned long address, bits; 593 unsigned long address, bits;
594 unsigned char skey; 594 unsigned char skey;
595 595
596 if (!pte_present(*ptep))
597 return pgste;
596 address = pte_val(*ptep) & PAGE_MASK; 598 address = pte_val(*ptep) & PAGE_MASK;
597 skey = page_get_storage_key(address); 599 skey = page_get_storage_key(address);
598 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); 600 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
599 /* Clear page changed & referenced bit in the storage key */ 601 /* Clear page changed & referenced bit in the storage key */
600 if (bits) { 602 if (bits & _PAGE_CHANGED)
601 skey ^= bits; 603 page_set_storage_key(address, skey ^ bits, 1);
602 page_set_storage_key(address, skey, 1); 604 else if (bits)
603 } 605 page_reset_referenced(address);
604 /* Transfer page changed & referenced bit to guest bits in pgste */ 606 /* Transfer page changed & referenced bit to guest bits in pgste */
605 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */ 607 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
606 /* Get host changed & referenced bits from pgste */ 608 /* Get host changed & referenced bits from pgste */
@@ -625,6 +627,8 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
625#ifdef CONFIG_PGSTE 627#ifdef CONFIG_PGSTE
626 int young; 628 int young;
627 629
630 if (!pte_present(*ptep))
631 return pgste;
628 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); 632 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
629 /* Transfer page referenced bit to pte software bit (host view) */ 633 /* Transfer page referenced bit to pte software bit (host view) */
630 if (young || (pgste_val(pgste) & RCP_HR_BIT)) 634 if (young || (pgste_val(pgste) & RCP_HR_BIT))
@@ -638,13 +642,15 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
638 642
639} 643}
640 644
641static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste) 645static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
642{ 646{
643#ifdef CONFIG_PGSTE 647#ifdef CONFIG_PGSTE
644 unsigned long address; 648 unsigned long address;
645 unsigned long okey, nkey; 649 unsigned long okey, nkey;
646 650
647 address = pte_val(*ptep) & PAGE_MASK; 651 if (!pte_present(entry))
652 return;
653 address = pte_val(entry) & PAGE_MASK;
648 okey = nkey = page_get_storage_key(address); 654 okey = nkey = page_get_storage_key(address);
649 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT); 655 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
650 /* Set page access key and fetch protection bit from pgste */ 656 /* Set page access key and fetch protection bit from pgste */
@@ -712,7 +718,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
712 718
713 if (mm_has_pgste(mm)) { 719 if (mm_has_pgste(mm)) {
714 pgste = pgste_get_lock(ptep); 720 pgste = pgste_get_lock(ptep);
715 pgste_set_pte(ptep, pgste); 721 pgste_set_pte(ptep, pgste, entry);
716 *ptep = entry; 722 *ptep = entry;
717 pgste_set_unlock(ptep, pgste); 723 pgste_set_unlock(ptep, pgste);
718 } else 724 } else
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 5a099714df04..097183c70407 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -82,6 +82,7 @@ extern unsigned int user_mode;
82#define MACHINE_FLAG_LPAR (1UL << 12) 82#define MACHINE_FLAG_LPAR (1UL << 12)
83#define MACHINE_FLAG_SPP (1UL << 13) 83#define MACHINE_FLAG_SPP (1UL << 13)
84#define MACHINE_FLAG_TOPOLOGY (1UL << 14) 84#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
85#define MACHINE_FLAG_STCKF (1UL << 15)
85 86
86#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 87#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
87#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 88#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -100,6 +101,7 @@ extern unsigned int user_mode;
100#define MACHINE_HAS_PFMF (0) 101#define MACHINE_HAS_PFMF (0)
101#define MACHINE_HAS_SPP (0) 102#define MACHINE_HAS_SPP (0)
102#define MACHINE_HAS_TOPOLOGY (0) 103#define MACHINE_HAS_TOPOLOGY (0)
104#define MACHINE_HAS_STCKF (0)
103#else /* __s390x__ */ 105#else /* __s390x__ */
104#define MACHINE_HAS_IEEE (1) 106#define MACHINE_HAS_IEEE (1)
105#define MACHINE_HAS_CSP (1) 107#define MACHINE_HAS_CSP (1)
@@ -111,6 +113,7 @@ extern unsigned int user_mode;
111#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) 113#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
112#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) 114#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
113#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) 115#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
116#define MACHINE_HAS_STCKF (S390_lowcore.machine_flags & MACHINE_FLAG_STCKF)
114#endif /* __s390x__ */ 117#endif /* __s390x__ */
115 118
116#define ZFCPDUMP_HSA_SIZE (32UL<<20) 119#define ZFCPDUMP_HSA_SIZE (32UL<<20)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index d610bef9c5e9..c447a27a7fdb 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -90,7 +90,7 @@ static inline unsigned long long get_clock_fast(void)
90{ 90{
91 unsigned long long clk; 91 unsigned long long clk;
92 92
93 if (test_facility(25)) 93 if (MACHINE_HAS_STCKF)
94 asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc"); 94 asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
95 else 95 else
96 clk = get_clock(); 96 clk = get_clock();
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 404bdb9671b4..58de4c91c333 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -277,7 +277,9 @@
277#define __NR_clock_adjtime 337 277#define __NR_clock_adjtime 337
278#define __NR_syncfs 338 278#define __NR_syncfs 338
279#define __NR_setns 339 279#define __NR_setns 339
280#define NR_syscalls 340 280#define __NR_process_vm_readv 340
281#define __NR_process_vm_writev 341
282#define NR_syscalls 342
281 283
282/* 284/*
283 * There are some system calls that are not present on 64 bit, some 285 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 5006a1d9f5d0..18c51df9fe06 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1627,3 +1627,23 @@ ENTRY(sys_setns_wrapper)
1627 lgfr %r2,%r2 # int 1627 lgfr %r2,%r2 # int
1628 lgfr %r3,%r3 # int 1628 lgfr %r3,%r3 # int
1629 jg sys_setns 1629 jg sys_setns
1630
1631ENTRY(compat_sys_process_vm_readv_wrapper)
1632 lgfr %r2,%r2 # compat_pid_t
1633 llgtr %r3,%r3 # struct compat_iovec __user *
1634 llgfr %r4,%r4 # unsigned long
1635 llgtr %r5,%r5 # struct compat_iovec __user *
1636 llgfr %r6,%r6 # unsigned long
1637 llgf %r0,164(%r15) # unsigned long
1638 stg %r0,160(%r15)
1639 jg sys_process_vm_readv
1640
1641ENTRY(compat_sys_process_vm_writev_wrapper)
1642 lgfr %r2,%r2 # compat_pid_t
1643 llgtr %r3,%r3 # struct compat_iovec __user *
1644 llgfr %r4,%r4 # unsigned long
1645 llgtr %r5,%r5 # struct compat_iovec __user *
1646 llgfr %r6,%r6 # unsigned long
1647 llgf %r0,164(%r15) # unsigned long
1648 stg %r0,160(%r15)
1649 jg sys_process_vm_writev
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 37394b3413e2..c9ffe0025197 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -390,6 +390,8 @@ static __init void detect_machine_facilities(void)
390 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; 390 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
391 if (test_facility(40)) 391 if (test_facility(40))
392 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; 392 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
393 if (test_facility(25))
394 S390_lowcore.machine_flags |= MACHINE_FLAG_STCKF;
393#endif 395#endif
394} 396}
395 397
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 450931a45b68..573bc29551ef 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -296,13 +296,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
296 ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)))) 296 ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
297 /* Invalid psw mask. */ 297 /* Invalid psw mask. */
298 return -EINVAL; 298 return -EINVAL;
299 if (addr == (addr_t) &dummy->regs.psw.addr)
300 /*
301 * The debugger changed the instruction address,
302 * reset system call restart, see signal.c:do_signal
303 */
304 task_thread_info(child)->system_call = 0;
305
306 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; 299 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
307 300
308 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { 301 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
@@ -614,11 +607,6 @@ static int __poke_user_compat(struct task_struct *child,
614 /* Transfer 31 bit amode bit to psw mask. */ 607 /* Transfer 31 bit amode bit to psw mask. */
615 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | 608 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
616 (__u64)(tmp & PSW32_ADDR_AMODE); 609 (__u64)(tmp & PSW32_ADDR_AMODE);
617 /*
618 * The debugger changed the instruction address,
619 * reset system call restart, see signal.c:do_signal
620 */
621 task_thread_info(child)->system_call = 0;
622 } else { 610 } else {
623 /* gpr 0-15 */ 611 /* gpr 0-15 */
624 *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp; 612 *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
@@ -905,6 +893,14 @@ static int s390_last_break_get(struct task_struct *target,
905 return 0; 893 return 0;
906} 894}
907 895
896static int s390_last_break_set(struct task_struct *target,
897 const struct user_regset *regset,
898 unsigned int pos, unsigned int count,
899 const void *kbuf, const void __user *ubuf)
900{
901 return 0;
902}
903
908#endif 904#endif
909 905
910static int s390_system_call_get(struct task_struct *target, 906static int s390_system_call_get(struct task_struct *target,
@@ -951,6 +947,7 @@ static const struct user_regset s390_regsets[] = {
951 .size = sizeof(long), 947 .size = sizeof(long),
952 .align = sizeof(long), 948 .align = sizeof(long),
953 .get = s390_last_break_get, 949 .get = s390_last_break_get,
950 .set = s390_last_break_set,
954 }, 951 },
955#endif 952#endif
956 [REGSET_SYSTEM_CALL] = { 953 [REGSET_SYSTEM_CALL] = {
@@ -1116,6 +1113,14 @@ static int s390_compat_last_break_get(struct task_struct *target,
1116 return 0; 1113 return 0;
1117} 1114}
1118 1115
1116static int s390_compat_last_break_set(struct task_struct *target,
1117 const struct user_regset *regset,
1118 unsigned int pos, unsigned int count,
1119 const void *kbuf, const void __user *ubuf)
1120{
1121 return 0;
1122}
1123
1119static const struct user_regset s390_compat_regsets[] = { 1124static const struct user_regset s390_compat_regsets[] = {
1120 [REGSET_GENERAL] = { 1125 [REGSET_GENERAL] = {
1121 .core_note_type = NT_PRSTATUS, 1126 .core_note_type = NT_PRSTATUS,
@@ -1139,6 +1144,7 @@ static const struct user_regset s390_compat_regsets[] = {
1139 .size = sizeof(long), 1144 .size = sizeof(long),
1140 .align = sizeof(long), 1145 .align = sizeof(long),
1141 .get = s390_compat_last_break_get, 1146 .get = s390_compat_last_break_get,
1147 .set = s390_compat_last_break_set,
1142 }, 1148 },
1143 [REGSET_SYSTEM_CALL] = { 1149 [REGSET_SYSTEM_CALL] = {
1144 .core_note_type = NT_S390_SYSTEM_CALL, 1150 .core_note_type = NT_S390_SYSTEM_CALL,
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 8ac6bfa2786c..e54c4ff8abaa 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -211,6 +211,8 @@ static void __init setup_zfcpdump(unsigned int console_devno)
211 211
212 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 212 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
213 return; 213 return;
214 if (OLDMEM_BASE)
215 return;
214 if (console_devno != -1) 216 if (console_devno != -1)
215 sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", 217 sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
216 ipl_info.data.fcp.dev_id.devno, console_devno); 218 ipl_info.data.fcp.dev_id.devno, console_devno);
@@ -482,7 +484,7 @@ static void __init setup_memory_end(void)
482 484
483 485
484#ifdef CONFIG_ZFCPDUMP 486#ifdef CONFIG_ZFCPDUMP
485 if (ipl_info.type == IPL_TYPE_FCP_DUMP) { 487 if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) {
486 memory_end = ZFCPDUMP_HSA_SIZE; 488 memory_end = ZFCPDUMP_HSA_SIZE;
487 memory_end_set = 1; 489 memory_end_set = 1;
488 } 490 }
@@ -577,7 +579,7 @@ static unsigned long __init find_crash_base(unsigned long crash_size,
577 *msg = "first memory chunk must be at least crashkernel size"; 579 *msg = "first memory chunk must be at least crashkernel size";
578 return 0; 580 return 0;
579 } 581 }
580 if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE)) 582 if (OLDMEM_BASE && crash_size == OLDMEM_SIZE)
581 return OLDMEM_BASE; 583 return OLDMEM_BASE;
582 584
583 for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { 585 for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 05a85bc14c98..7f6f9f354545 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -460,9 +460,9 @@ void do_signal(struct pt_regs *regs)
460 regs->svc_code >> 16); 460 regs->svc_code >> 16);
461 break; 461 break;
462 } 462 }
463 /* No longer in a system call */
464 clear_thread_flag(TIF_SYSCALL);
465 } 463 }
464 /* No longer in a system call */
465 clear_thread_flag(TIF_SYSCALL);
466 466
467 if ((is_compat_task() ? 467 if ((is_compat_task() ?
468 handle_signal32(signr, &ka, &info, oldset, regs) : 468 handle_signal32(signr, &ka, &info, oldset, regs) :
@@ -486,6 +486,7 @@ void do_signal(struct pt_regs *regs)
486 } 486 }
487 487
488 /* No handlers present - check for system call restart */ 488 /* No handlers present - check for system call restart */
489 clear_thread_flag(TIF_SYSCALL);
489 if (current_thread_info()->system_call) { 490 if (current_thread_info()->system_call) {
490 regs->svc_code = current_thread_info()->system_call; 491 regs->svc_code = current_thread_info()->system_call;
491 switch (regs->gprs[2]) { 492 switch (regs->gprs[2]) {
@@ -500,9 +501,6 @@ void do_signal(struct pt_regs *regs)
500 regs->gprs[2] = regs->orig_gpr2; 501 regs->gprs[2] = regs->orig_gpr2;
501 set_thread_flag(TIF_SYSCALL); 502 set_thread_flag(TIF_SYSCALL);
502 break; 503 break;
503 default:
504 clear_thread_flag(TIF_SYSCALL);
505 break;
506 } 504 }
507 } 505 }
508 506
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 73eb08c874fb..bcab2f04ba58 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -348,3 +348,5 @@ SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at
348SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper) 348SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper)
349SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper) 349SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
350SYSCALL(sys_setns,sys_setns,sys_setns_wrapper) 350SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
351SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */
352SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 77b8942b9a15..fdb5b8cb260f 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -68,8 +68,10 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
68 return mask; 68 return mask;
69} 69}
70 70
71static void add_cpus_to_mask(struct topology_cpu *tl_cpu, 71static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
72 struct mask_info *book, struct mask_info *core) 72 struct mask_info *book,
73 struct mask_info *core,
74 int z10)
73{ 75{
74 unsigned int cpu; 76 unsigned int cpu;
75 77
@@ -88,10 +90,16 @@ static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
88 cpu_book_id[lcpu] = book->id; 90 cpu_book_id[lcpu] = book->id;
89#endif 91#endif
90 cpumask_set_cpu(lcpu, &core->mask); 92 cpumask_set_cpu(lcpu, &core->mask);
91 cpu_core_id[lcpu] = core->id; 93 if (z10) {
94 cpu_core_id[lcpu] = rcpu;
95 core = core->next;
96 } else {
97 cpu_core_id[lcpu] = core->id;
98 }
92 smp_cpu_polarization[lcpu] = tl_cpu->pp; 99 smp_cpu_polarization[lcpu] = tl_cpu->pp;
93 } 100 }
94 } 101 }
102 return core;
95} 103}
96 104
97static void clear_masks(void) 105static void clear_masks(void)
@@ -123,18 +131,41 @@ static void tl_to_cores(struct sysinfo_15_1_x *info)
123{ 131{
124#ifdef CONFIG_SCHED_BOOK 132#ifdef CONFIG_SCHED_BOOK
125 struct mask_info *book = &book_info; 133 struct mask_info *book = &book_info;
134 struct cpuid cpu_id;
126#else 135#else
127 struct mask_info *book = NULL; 136 struct mask_info *book = NULL;
128#endif 137#endif
129 struct mask_info *core = &core_info; 138 struct mask_info *core = &core_info;
130 union topology_entry *tle, *end; 139 union topology_entry *tle, *end;
140 int z10 = 0;
131 141
132 142#ifdef CONFIG_SCHED_BOOK
143 get_cpu_id(&cpu_id);
144 z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098;
145#endif
133 spin_lock_irq(&topology_lock); 146 spin_lock_irq(&topology_lock);
134 clear_masks(); 147 clear_masks();
135 tle = info->tle; 148 tle = info->tle;
136 end = (union topology_entry *)((unsigned long)info + info->length); 149 end = (union topology_entry *)((unsigned long)info + info->length);
137 while (tle < end) { 150 while (tle < end) {
151#ifdef CONFIG_SCHED_BOOK
152 if (z10) {
153 switch (tle->nl) {
154 case 1:
155 book = book->next;
156 book->id = tle->container.id;
157 break;
158 case 0:
159 core = add_cpus_to_mask(&tle->cpu, book, core, z10);
160 break;
161 default:
162 clear_masks();
163 goto out;
164 }
165 tle = next_tle(tle);
166 continue;
167 }
168#endif
138 switch (tle->nl) { 169 switch (tle->nl) {
139#ifdef CONFIG_SCHED_BOOK 170#ifdef CONFIG_SCHED_BOOK
140 case 2: 171 case 2:
@@ -147,7 +178,7 @@ static void tl_to_cores(struct sysinfo_15_1_x *info)
147 core->id = tle->container.id; 178 core->id = tle->container.id;
148 break; 179 break;
149 case 0: 180 case 0:
150 add_cpus_to_mask(&tle->cpu, book, core); 181 add_cpus_to_mask(&tle->cpu, book, core, z10);
151 break; 182 break;
152 default: 183 default:
153 clear_masks(); 184 clear_masks();
@@ -328,8 +359,8 @@ void __init s390_init_cpu_topology(void)
328 for (i = 0; i < TOPOLOGY_NR_MAG; i++) 359 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
329 printk(" %d", info->mag[i]); 360 printk(" %d", info->mag[i]);
330 printk(" / %d\n", info->mnest); 361 printk(" / %d\n", info->mnest);
331 alloc_masks(info, &core_info, 2); 362 alloc_masks(info, &core_info, 1);
332#ifdef CONFIG_SCHED_BOOK 363#ifdef CONFIG_SCHED_BOOK
333 alloc_masks(info, &book_info, 3); 364 alloc_masks(info, &book_info, 2);
334#endif 365#endif
335} 366}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 56fe6bc81fee..e4c79ebb40e6 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -43,6 +43,8 @@ SECTIONS
43 43
44 NOTES :text :note 44 NOTES :text :note
45 45
46 .dummy : { *(.dummy) } :data
47
46 RODATA 48 RODATA
47 49
48#ifdef CONFIG_SHARED_KERNEL 50#ifdef CONFIG_SHARED_KERNEL
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 87cedd61be04..8943e82cd4d9 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -70,7 +70,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
70 return -EOPNOTSUPP; 70 return -EOPNOTSUPP;
71 } 71 }
72 72
73 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 73 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
74 vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; 74 vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
75 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; 75 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
76 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; 76 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index c7c51898984e..02434543eabb 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -132,7 +132,6 @@ static int handle_stop(struct kvm_vcpu *vcpu)
132 int rc = 0; 132 int rc = 0;
133 133
134 vcpu->stat.exit_stop_request++; 134 vcpu->stat.exit_stop_request++;
135 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
136 spin_lock_bh(&vcpu->arch.local_int.lock); 135 spin_lock_bh(&vcpu->arch.local_int.lock);
137 if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { 136 if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
138 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; 137 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
@@ -149,6 +148,8 @@ static int handle_stop(struct kvm_vcpu *vcpu)
149 } 148 }
150 149
151 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { 150 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
151 atomic_set_mask(CPUSTAT_STOPPED,
152 &vcpu->arch.sie_block->cpuflags);
152 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; 153 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
153 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); 154 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
154 rc = -EOPNOTSUPP; 155 rc = -EOPNOTSUPP;
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 87c16705b381..278ee009ce65 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -252,6 +252,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
252 offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); 252 offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
253 if (rc == -EFAULT) 253 if (rc == -EFAULT)
254 exception = 1; 254 exception = 1;
255 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
255 break; 256 break;
256 257
257 case KVM_S390_PROGRAM_INT: 258 case KVM_S390_PROGRAM_INT:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0bd3bea1e4cd..d1c445732451 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -65,6 +65,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot) }, 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
68 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
69 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
70 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
@@ -127,6 +128,7 @@ int kvm_dev_ioctl_check_extension(long ext)
127 switch (ext) { 128 switch (ext) {
128 case KVM_CAP_S390_PSW: 129 case KVM_CAP_S390_PSW:
129 case KVM_CAP_S390_GMAP: 130 case KVM_CAP_S390_GMAP:
131 case KVM_CAP_SYNC_MMU:
130 r = 1; 132 r = 1;
131 break; 133 break;
132 default: 134 default:
@@ -270,10 +272,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
270 restore_fp_regs(&vcpu->arch.guest_fpregs); 272 restore_fp_regs(&vcpu->arch.guest_fpregs);
271 restore_access_regs(vcpu->arch.guest_acrs); 273 restore_access_regs(vcpu->arch.guest_acrs);
272 gmap_enable(vcpu->arch.gmap); 274 gmap_enable(vcpu->arch.gmap);
275 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
273} 276}
274 277
275void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 278void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
276{ 279{
280 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
277 gmap_disable(vcpu->arch.gmap); 281 gmap_disable(vcpu->arch.gmap);
278 save_fp_regs(&vcpu->arch.guest_fpregs); 282 save_fp_regs(&vcpu->arch.guest_fpregs);
279 save_access_regs(vcpu->arch.guest_acrs); 283 save_access_regs(vcpu->arch.guest_acrs);
@@ -301,7 +305,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
301 305
302int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 306int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
303{ 307{
304 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM); 308 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
309 CPUSTAT_SM |
310 CPUSTAT_STOPPED);
305 vcpu->arch.sie_block->ecb = 6; 311 vcpu->arch.sie_block->ecb = 6;
306 vcpu->arch.sie_block->eca = 0xC1002001U; 312 vcpu->arch.sie_block->eca = 0xC1002001U;
307 vcpu->arch.sie_block->fac = (int) (long) facilities; 313 vcpu->arch.sie_block->fac = (int) (long) facilities;
@@ -428,7 +434,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
428{ 434{
429 int rc = 0; 435 int rc = 0;
430 436
431 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) 437 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
432 rc = -EBUSY; 438 rc = -EBUSY;
433 else { 439 else {
434 vcpu->run->psw_mask = psw.mask; 440 vcpu->run->psw_mask = psw.mask;
@@ -501,7 +507,7 @@ rerun_vcpu:
501 if (vcpu->sigset_active) 507 if (vcpu->sigset_active)
502 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 508 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
503 509
504 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 510 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
505 511
506 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); 512 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
507 513
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 391626361084..d02638959922 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -336,6 +336,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
336 u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0; 336 u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0;
337 u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0; 337 u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0;
338 struct vm_area_struct *vma; 338 struct vm_area_struct *vma;
339 unsigned long user_address;
339 340
340 vcpu->stat.instruction_tprot++; 341 vcpu->stat.instruction_tprot++;
341 342
@@ -349,9 +350,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
349 return -EOPNOTSUPP; 350 return -EOPNOTSUPP;
350 351
351 352
353 /* we must resolve the address without holding the mmap semaphore.
354 * This is ok since the userspace hypervisor is not supposed to change
355 * the mapping while the guest queries the memory. Otherwise the guest
356 * might crash or get wrong info anyway. */
357 user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);
358
352 down_read(&current->mm->mmap_sem); 359 down_read(&current->mm->mmap_sem);
353 vma = find_vma(current->mm, 360 vma = find_vma(current->mm, user_address);
354 (unsigned long) __guestaddr_to_user(vcpu, address1));
355 if (!vma) { 361 if (!vma) {
356 up_read(&current->mm->mmap_sem); 362 up_read(&current->mm->mmap_sem);
357 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 363 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index f815118835f3..0a7941d74bc6 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -31,9 +31,11 @@
31#define SIGP_SET_PREFIX 0x0d 31#define SIGP_SET_PREFIX 0x0d
32#define SIGP_STORE_STATUS_ADDR 0x0e 32#define SIGP_STORE_STATUS_ADDR 0x0e
33#define SIGP_SET_ARCH 0x12 33#define SIGP_SET_ARCH 0x12
34#define SIGP_SENSE_RUNNING 0x15
34 35
35/* cpu status bits */ 36/* cpu status bits */
36#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL 37#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
38#define SIGP_STAT_NOT_RUNNING 0x00000400UL
37#define SIGP_STAT_INCORRECT_STATE 0x00000200UL 39#define SIGP_STAT_INCORRECT_STATE 0x00000200UL
38#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL 40#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
39#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL 41#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
@@ -57,8 +59,8 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
57 spin_lock(&fi->lock); 59 spin_lock(&fi->lock);
58 if (fi->local_int[cpu_addr] == NULL) 60 if (fi->local_int[cpu_addr] == NULL)
59 rc = 3; /* not operational */ 61 rc = 3; /* not operational */
60 else if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 62 else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
61 & CPUSTAT_RUNNING) { 63 & CPUSTAT_STOPPED)) {
62 *reg &= 0xffffffff00000000UL; 64 *reg &= 0xffffffff00000000UL;
63 rc = 1; /* status stored */ 65 rc = 1; /* status stored */
64 } else { 66 } else {
@@ -251,7 +253,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
251 253
252 spin_lock_bh(&li->lock); 254 spin_lock_bh(&li->lock);
253 /* cpu must be in stopped state */ 255 /* cpu must be in stopped state */
254 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { 256 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
255 rc = 1; /* incorrect state */ 257 rc = 1; /* incorrect state */
256 *reg &= SIGP_STAT_INCORRECT_STATE; 258 *reg &= SIGP_STAT_INCORRECT_STATE;
257 kfree(inti); 259 kfree(inti);
@@ -275,6 +277,38 @@ out_fi:
275 return rc; 277 return rc;
276} 278}
277 279
280static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
281 unsigned long *reg)
282{
283 int rc;
284 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
285
286 if (cpu_addr >= KVM_MAX_VCPUS)
287 return 3; /* not operational */
288
289 spin_lock(&fi->lock);
290 if (fi->local_int[cpu_addr] == NULL)
291 rc = 3; /* not operational */
292 else {
293 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
294 & CPUSTAT_RUNNING) {
295 /* running */
296 rc = 1;
297 } else {
298 /* not running */
299 *reg &= 0xffffffff00000000UL;
300 *reg |= SIGP_STAT_NOT_RUNNING;
301 rc = 0;
302 }
303 }
304 spin_unlock(&fi->lock);
305
306 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
307 rc);
308
309 return rc;
310}
311
278int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) 312int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
279{ 313{
280 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 314 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
@@ -331,6 +365,11 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
331 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, 365 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
332 &vcpu->arch.guest_gprs[r1]); 366 &vcpu->arch.guest_gprs[r1]);
333 break; 367 break;
368 case SIGP_SENSE_RUNNING:
369 vcpu->stat.instruction_sigp_sense_running++;
370 rc = __sigp_sense_running(vcpu, cpu_addr,
371 &vcpu->arch.guest_gprs[r1]);
372 break;
334 case SIGP_RESTART: 373 case SIGP_RESTART:
335 vcpu->stat.instruction_sigp_restart++; 374 vcpu->stat.instruction_sigp_restart++;
336 /* user space must know about restart */ 375 /* user space must know about restart */
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 1766def5bc3f..a9a301866b3c 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -587,8 +587,13 @@ static void pfault_interrupt(unsigned int ext_int_code,
587 } else { 587 } else {
588 /* Completion interrupt was faster than initial 588 /* Completion interrupt was faster than initial
589 * interrupt. Set pfault_wait to -1 so the initial 589 * interrupt. Set pfault_wait to -1 so the initial
590 * interrupt doesn't put the task to sleep. */ 590 * interrupt doesn't put the task to sleep.
591 tsk->thread.pfault_wait = -1; 591 * If the task is not running, ignore the completion
592 * interrupt since it must be a leftover of a PFAULT
593 * CANCEL operation which didn't remove all pending
594 * completion interrupts. */
595 if (tsk->state == TASK_RUNNING)
596 tsk->thread.pfault_wait = -1;
592 } 597 }
593 put_task_struct(tsk); 598 put_task_struct(tsk);
594 } else { 599 } else {
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 0dca9a5c6be6..15d970328f71 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -151,8 +151,13 @@ typedef struct page *pgtable_t;
151#endif /* !__ASSEMBLY__ */ 151#endif /* !__ASSEMBLY__ */
152 152
153#ifdef CONFIG_UNCACHED_MAPPING 153#ifdef CONFIG_UNCACHED_MAPPING
154#if defined(CONFIG_29BIT)
155#define UNCAC_ADDR(addr) P2SEGADDR(addr)
156#define CAC_ADDR(addr) P1SEGADDR(addr)
157#else
154#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start) 158#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start)
155#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET) 159#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
160#endif
156#else 161#else
157#define UNCAC_ADDR(addr) ((addr)) 162#define UNCAC_ADDR(addr) ((addr))
158#define CAC_ADDR(addr) ((addr)) 163#define CAC_ADDR(addr) ((addr))
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index 3432008d2888..152b8627a184 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -375,8 +375,10 @@
375#define __NR_syncfs 362 375#define __NR_syncfs 362
376#define __NR_sendmmsg 363 376#define __NR_sendmmsg 363
377#define __NR_setns 364 377#define __NR_setns 364
378#define __NR_process_vm_readv 365
379#define __NR_process_vm_writev 366
378 380
379#define NR_syscalls 365 381#define NR_syscalls 367
380 382
381#ifdef __KERNEL__ 383#ifdef __KERNEL__
382 384
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index ec9898665f23..c330c23db5a0 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -396,10 +396,12 @@
396#define __NR_syncfs 373 396#define __NR_syncfs 373
397#define __NR_sendmmsg 374 397#define __NR_sendmmsg 374
398#define __NR_setns 375 398#define __NR_setns 375
399#define __NR_process_vm_readv 376
400#define __NR_process_vm_writev 377
399 401
400#ifdef __KERNEL__ 402#ifdef __KERNEL__
401 403
402#define NR_syscalls 376 404#define NR_syscalls 378
403 405
404#define __ARCH_WANT_IPC_PARSE_VERSION 406#define __ARCH_WANT_IPC_PARSE_VERSION
405#define __ARCH_WANT_OLD_READDIR 407#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
index a43124e608c3..0bd744f9a3b7 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -176,10 +176,12 @@ static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
176static struct plat_sci_port scif0_platform_data = { 176static struct plat_sci_port scif0_platform_data = {
177 .mapbase = 0xfffe8000, 177 .mapbase = 0xfffe8000,
178 .flags = UPF_BOOT_AUTOCONF, 178 .flags = UPF_BOOT_AUTOCONF,
179 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 179 .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
180 SCSCR_REIE,
180 .scbrr_algo_id = SCBRR_ALGO_2, 181 .scbrr_algo_id = SCBRR_ALGO_2,
181 .type = PORT_SCIF, 182 .type = PORT_SCIF,
182 .irqs = { 192, 192, 192, 192 }, 183 .irqs = { 192, 192, 192, 192 },
184 .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
183}; 185};
184 186
185static struct platform_device scif0_device = { 187static struct platform_device scif0_device = {
@@ -193,10 +195,12 @@ static struct platform_device scif0_device = {
193static struct plat_sci_port scif1_platform_data = { 195static struct plat_sci_port scif1_platform_data = {
194 .mapbase = 0xfffe8800, 196 .mapbase = 0xfffe8800,
195 .flags = UPF_BOOT_AUTOCONF, 197 .flags = UPF_BOOT_AUTOCONF,
196 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 198 .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
199 SCSCR_REIE,
197 .scbrr_algo_id = SCBRR_ALGO_2, 200 .scbrr_algo_id = SCBRR_ALGO_2,
198 .type = PORT_SCIF, 201 .type = PORT_SCIF,
199 .irqs = { 196, 196, 196, 196 }, 202 .irqs = { 196, 196, 196, 196 },
203 .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
200}; 204};
201 205
202static struct platform_device scif1_device = { 206static struct platform_device scif1_device = {
@@ -210,10 +214,12 @@ static struct platform_device scif1_device = {
210static struct plat_sci_port scif2_platform_data = { 214static struct plat_sci_port scif2_platform_data = {
211 .mapbase = 0xfffe9000, 215 .mapbase = 0xfffe9000,
212 .flags = UPF_BOOT_AUTOCONF, 216 .flags = UPF_BOOT_AUTOCONF,
213 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 217 .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
218 SCSCR_REIE,
214 .scbrr_algo_id = SCBRR_ALGO_2, 219 .scbrr_algo_id = SCBRR_ALGO_2,
215 .type = PORT_SCIF, 220 .type = PORT_SCIF,
216 .irqs = { 200, 200, 200, 200 }, 221 .irqs = { 200, 200, 200, 200 },
222 .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
217}; 223};
218 224
219static struct platform_device scif2_device = { 225static struct platform_device scif2_device = {
@@ -227,10 +233,12 @@ static struct platform_device scif2_device = {
227static struct plat_sci_port scif3_platform_data = { 233static struct plat_sci_port scif3_platform_data = {
228 .mapbase = 0xfffe9800, 234 .mapbase = 0xfffe9800,
229 .flags = UPF_BOOT_AUTOCONF, 235 .flags = UPF_BOOT_AUTOCONF,
230 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 236 .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
237 SCSCR_REIE,
231 .scbrr_algo_id = SCBRR_ALGO_2, 238 .scbrr_algo_id = SCBRR_ALGO_2,
232 .type = PORT_SCIF, 239 .type = PORT_SCIF,
233 .irqs = { 204, 204, 204, 204 }, 240 .irqs = { 204, 204, 204, 204 },
241 .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
234}; 242};
235 243
236static struct platform_device scif3_device = { 244static struct platform_device scif3_device = {
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 293e39c59c00..ee56a9b1a981 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -382,3 +382,5 @@ ENTRY(sys_call_table)
382 .long sys_syncfs 382 .long sys_syncfs
383 .long sys_sendmmsg 383 .long sys_sendmmsg
384 .long sys_setns 384 .long sys_setns
385 .long sys_process_vm_readv /* 365 */
386 .long sys_process_vm_writev
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index ceb34b94afa9..9af7de26fb71 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -402,3 +402,5 @@ sys_call_table:
402 .long sys_syncfs 402 .long sys_syncfs
403 .long sys_sendmmsg 403 .long sys_sendmmsg
404 .long sys_setns /* 375 */ 404 .long sys_setns /* 375 */
405 .long sys_process_vm_readv
406 .long sys_process_vm_writev
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 5b31a8e89823..a790cc657476 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -431,10 +431,6 @@ extern unsigned long *sparc_valid_addr_bitmap;
431#define kern_addr_valid(addr) \ 431#define kern_addr_valid(addr) \
432 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap)) 432 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
433 433
434extern int io_remap_pfn_range(struct vm_area_struct *vma,
435 unsigned long from, unsigned long pfn,
436 unsigned long size, pgprot_t prot);
437
438/* 434/*
439 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in 435 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
440 * its high 4 bits. These macros/functions put it there or get it from there. 436 * its high 4 bits. These macros/functions put it there or get it from there.
@@ -443,6 +439,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma,
443#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) 439#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
444#define GET_PFN(pfn) (pfn & 0x0fffffffUL) 440#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
445 441
442extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
443 unsigned long, pgprot_t);
444
445static inline int io_remap_pfn_range(struct vm_area_struct *vma,
446 unsigned long from, unsigned long pfn,
447 unsigned long size, pgprot_t prot)
448{
449 unsigned long long offset, space, phys_base;
450
451 offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
452 space = GET_IOSPACE(pfn);
453 phys_base = offset | (space << 32ULL);
454
455 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
456}
457
446#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 458#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
447#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 459#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
448({ \ 460({ \
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index adf89329af59..38ebb2c60137 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -757,10 +757,6 @@ static inline bool kern_addr_valid(unsigned long addr)
757 757
758extern int page_in_phys_avail(unsigned long paddr); 758extern int page_in_phys_avail(unsigned long paddr);
759 759
760extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
761 unsigned long pfn,
762 unsigned long size, pgprot_t prot);
763
764/* 760/*
765 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in 761 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
766 * its high 4 bits. These macros/functions put it there or get it from there. 762 * its high 4 bits. These macros/functions put it there or get it from there.
@@ -769,6 +765,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
769#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) 765#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
770#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL) 766#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
771 767
768extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
769 unsigned long, pgprot_t);
770
771static inline int io_remap_pfn_range(struct vm_area_struct *vma,
772 unsigned long from, unsigned long pfn,
773 unsigned long size, pgprot_t prot)
774{
775 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
776 int space = GET_IOSPACE(pfn);
777 unsigned long phys_base;
778
779 phys_base = offset | (((unsigned long) space) << 32UL);
780
781 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
782}
783
772#include <asm-generic/pgtable.h> 784#include <asm-generic/pgtable.h>
773 785
774/* We provide our own get_unmapped_area to cope with VA holes and 786/* We provide our own get_unmapped_area to cope with VA holes and
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index 6260d5deeabc..c7cb0af0eb59 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -406,8 +406,10 @@
406#define __NR_syncfs 335 406#define __NR_syncfs 335
407#define __NR_sendmmsg 336 407#define __NR_sendmmsg 336
408#define __NR_setns 337 408#define __NR_setns 337
409#define __NR_process_vm_readv 338
410#define __NR_process_vm_writev 339
409 411
410#define NR_syscalls 338 412#define NR_syscalls 340
411 413
412#ifdef __32bit_syscall_numbers__ 414#ifdef __32bit_syscall_numbers__
413/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 415/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 7429b47c3aca..381edcd5bc29 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -1181,13 +1181,11 @@ static int __devinit ds_probe(struct vio_dev *vdev,
1181 1181
1182 dp->rcv_buf_len = 4096; 1182 dp->rcv_buf_len = 4096;
1183 1183
1184 dp->ds_states = kzalloc(sizeof(ds_states_template), 1184 dp->ds_states = kmemdup(ds_states_template,
1185 GFP_KERNEL); 1185 sizeof(ds_states_template), GFP_KERNEL);
1186 if (!dp->ds_states) 1186 if (!dp->ds_states)
1187 goto out_free_rcv_buf; 1187 goto out_free_rcv_buf;
1188 1188
1189 memcpy(dp->ds_states, ds_states_template,
1190 sizeof(ds_states_template));
1191 dp->num_ds_states = ARRAY_SIZE(ds_states_template); 1189 dp->num_ds_states = ARRAY_SIZE(ds_states_template);
1192 1190
1193 for (i = 0; i < dp->num_ds_states; i++) 1191 for (i = 0; i < dp->num_ds_states; i++)
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index e27f8ea8656e..0c218e4c0881 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -42,6 +42,9 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
42extern void fpload(unsigned long *fpregs, unsigned long *fsr); 42extern void fpload(unsigned long *fpregs, unsigned long *fsr);
43 43
44#else /* CONFIG_SPARC32 */ 44#else /* CONFIG_SPARC32 */
45
46#include <asm/trap_block.h>
47
45struct popc_3insn_patch_entry { 48struct popc_3insn_patch_entry {
46 unsigned int addr; 49 unsigned int addr;
47 unsigned int insns[3]; 50 unsigned int insns[3];
@@ -57,6 +60,10 @@ extern struct popc_6insn_patch_entry __popc_6insn_patch,
57 __popc_6insn_patch_end; 60 __popc_6insn_patch_end;
58 61
59extern void __init per_cpu_patch(void); 62extern void __init per_cpu_patch(void);
63extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
64 struct sun4v_1insn_patch_entry *);
65extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
66 struct sun4v_2insn_patch_entry *);
60extern void __init sun4v_patch(void); 67extern void __init sun4v_patch(void);
61extern void __init boot_cpu_id_too_large(int cpu); 68extern void __init boot_cpu_id_too_large(int cpu);
62extern unsigned int dcache_parity_tl1_occurred; 69extern unsigned int dcache_parity_tl1_occurred;
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index da0c6c70ccb2..e5519870c3d9 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -17,6 +17,8 @@
17#include <asm/processor.h> 17#include <asm/processor.h>
18#include <asm/spitfire.h> 18#include <asm/spitfire.h>
19 19
20#include "entry.h"
21
20#ifdef CONFIG_SPARC64 22#ifdef CONFIG_SPARC64
21 23
22#include <linux/jump_label.h> 24#include <linux/jump_label.h>
@@ -203,6 +205,29 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
203} 205}
204 206
205#ifdef CONFIG_SPARC64 207#ifdef CONFIG_SPARC64
208static void do_patch_sections(const Elf_Ehdr *hdr,
209 const Elf_Shdr *sechdrs)
210{
211 const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL;
212 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
213
214 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
215 if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name))
216 sun4v_1insn = s;
217 if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name))
218 sun4v_2insn = s;
219 }
220
221 if (sun4v_1insn && tlb_type == hypervisor) {
222 void *p = (void *) sun4v_1insn->sh_addr;
223 sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size);
224 }
225 if (sun4v_2insn && tlb_type == hypervisor) {
226 void *p = (void *) sun4v_2insn->sh_addr;
227 sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size);
228 }
229}
230
206int module_finalize(const Elf_Ehdr *hdr, 231int module_finalize(const Elf_Ehdr *hdr,
207 const Elf_Shdr *sechdrs, 232 const Elf_Shdr *sechdrs,
208 struct module *me) 233 struct module *me)
@@ -210,6 +235,8 @@ int module_finalize(const Elf_Ehdr *hdr,
210 /* make jump label nops */ 235 /* make jump label nops */
211 jump_label_apply_nops(me); 236 jump_label_apply_nops(me);
212 237
238 do_patch_sections(hdr, sechdrs);
239
213 /* Cheetah's I-cache is fully coherent. */ 240 /* Cheetah's I-cache is fully coherent. */
214 if (tlb_type == spitfire) { 241 if (tlb_type == spitfire) {
215 unsigned long va; 242 unsigned long va;
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 46614807a57f..741df916c124 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -58,12 +58,10 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
58 void *new_val; 58 void *new_val;
59 int err; 59 int err;
60 60
61 new_val = kmalloc(len, GFP_KERNEL); 61 new_val = kmemdup(val, len, GFP_KERNEL);
62 if (!new_val) 62 if (!new_val)
63 return -ENOMEM; 63 return -ENOMEM;
64 64
65 memcpy(new_val, val, len);
66
67 err = -ENODEV; 65 err = -ENODEV;
68 66
69 mutex_lock(&of_set_property_mutex); 67 mutex_lock(&of_set_property_mutex);
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index c965595aa7e9..a854a1c240ff 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -234,40 +234,50 @@ void __init per_cpu_patch(void)
234 } 234 }
235} 235}
236 236
237void __init sun4v_patch(void) 237void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
238 struct sun4v_1insn_patch_entry *end)
238{ 239{
239 extern void sun4v_hvapi_init(void); 240 while (start < end) {
240 struct sun4v_1insn_patch_entry *p1; 241 unsigned long addr = start->addr;
241 struct sun4v_2insn_patch_entry *p2;
242
243 if (tlb_type != hypervisor)
244 return;
245 242
246 p1 = &__sun4v_1insn_patch; 243 *(unsigned int *) (addr + 0) = start->insn;
247 while (p1 < &__sun4v_1insn_patch_end) {
248 unsigned long addr = p1->addr;
249
250 *(unsigned int *) (addr + 0) = p1->insn;
251 wmb(); 244 wmb();
252 __asm__ __volatile__("flush %0" : : "r" (addr + 0)); 245 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
253 246
254 p1++; 247 start++;
255 } 248 }
249}
256 250
257 p2 = &__sun4v_2insn_patch; 251void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
258 while (p2 < &__sun4v_2insn_patch_end) { 252 struct sun4v_2insn_patch_entry *end)
259 unsigned long addr = p2->addr; 253{
254 while (start < end) {
255 unsigned long addr = start->addr;
260 256
261 *(unsigned int *) (addr + 0) = p2->insns[0]; 257 *(unsigned int *) (addr + 0) = start->insns[0];
262 wmb(); 258 wmb();
263 __asm__ __volatile__("flush %0" : : "r" (addr + 0)); 259 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
264 260
265 *(unsigned int *) (addr + 4) = p2->insns[1]; 261 *(unsigned int *) (addr + 4) = start->insns[1];
266 wmb(); 262 wmb();
267 __asm__ __volatile__("flush %0" : : "r" (addr + 4)); 263 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
268 264
269 p2++; 265 start++;
270 } 266 }
267}
268
269void __init sun4v_patch(void)
270{
271 extern void sun4v_hvapi_init(void);
272
273 if (tlb_type != hypervisor)
274 return;
275
276 sun4v_patch_1insn_range(&__sun4v_1insn_patch,
277 &__sun4v_1insn_patch_end);
278
279 sun4v_patch_2insn_range(&__sun4v_2insn_patch,
280 &__sun4v_2insn_patch_end);
271 281
272 sun4v_hvapi_init(); 282 sun4v_hvapi_init();
273} 283}
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 2caa556db86d..023b8860dc97 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -822,21 +822,23 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
822 * want to handle. Thus you cannot kill init even with a SIGKILL even by 822 * want to handle. Thus you cannot kill init even with a SIGKILL even by
823 * mistake. 823 * mistake.
824 */ 824 */
825void do_signal32(sigset_t *oldset, struct pt_regs * regs, 825void do_signal32(sigset_t *oldset, struct pt_regs * regs)
826 int restart_syscall, unsigned long orig_i0)
827{ 826{
828 struct k_sigaction ka; 827 struct k_sigaction ka;
828 unsigned long orig_i0;
829 int restart_syscall;
829 siginfo_t info; 830 siginfo_t info;
830 int signr; 831 int signr;
831 832
832 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 833 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
833 834
834 /* If the debugger messes with the program counter, it clears 835 restart_syscall = 0;
835 * the "in syscall" bit, directing us to not perform a syscall 836 orig_i0 = 0;
836 * restart. 837 if (pt_regs_is_syscall(regs) &&
837 */ 838 (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
838 if (restart_syscall && !pt_regs_is_syscall(regs)) 839 restart_syscall = 1;
839 restart_syscall = 0; 840 orig_i0 = regs->u_regs[UREG_G6];
841 }
840 842
841 if (signr > 0) { 843 if (signr > 0) {
842 if (restart_syscall) 844 if (restart_syscall)
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 8ce247ac04cc..d54c6e53aba0 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -519,10 +519,26 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
519 siginfo_t info; 519 siginfo_t info;
520 int signr; 520 int signr;
521 521
522 /* It's a lot of work and synchronization to add a new ptrace
523 * register for GDB to save and restore in order to get
524 * orig_i0 correct for syscall restarts when debugging.
525 *
526 * Although it should be the case that most of the global
527 * registers are volatile across a system call, glibc already
528 * depends upon that fact that we preserve them. So we can't
529 * just use any global register to save away the orig_i0 value.
530 *
531 * In particular %g2, %g3, %g4, and %g5 are all assumed to be
532 * preserved across a system call trap by various pieces of
533 * code in glibc.
534 *
535 * %g7 is used as the "thread register". %g6 is not used in
536 * any fixed manner. %g6 is used as a scratch register and
537 * a compiler temporary, but it's value is never used across
538 * a system call. Therefore %g6 is usable for orig_i0 storage.
539 */
522 if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) 540 if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
523 restart_syscall = 1; 541 regs->u_regs[UREG_G6] = orig_i0;
524 else
525 restart_syscall = 0;
526 542
527 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 543 if (test_thread_flag(TIF_RESTORE_SIGMASK))
528 oldset = &current->saved_sigmask; 544 oldset = &current->saved_sigmask;
@@ -535,8 +551,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
535 * the software "in syscall" bit, directing us to not perform 551 * the software "in syscall" bit, directing us to not perform
536 * a syscall restart. 552 * a syscall restart.
537 */ 553 */
538 if (restart_syscall && !pt_regs_is_syscall(regs)) 554 restart_syscall = 0;
539 restart_syscall = 0; 555 if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) {
556 restart_syscall = 1;
557 orig_i0 = regs->u_regs[UREG_G6];
558 }
559
540 560
541 if (signr > 0) { 561 if (signr > 0) {
542 if (restart_syscall) 562 if (restart_syscall)
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index a2b81598d905..f0836cd0e2f2 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -529,11 +529,27 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
529 siginfo_t info; 529 siginfo_t info;
530 int signr; 530 int signr;
531 531
532 /* It's a lot of work and synchronization to add a new ptrace
533 * register for GDB to save and restore in order to get
534 * orig_i0 correct for syscall restarts when debugging.
535 *
536 * Although it should be the case that most of the global
537 * registers are volatile across a system call, glibc already
538 * depends upon that fact that we preserve them. So we can't
539 * just use any global register to save away the orig_i0 value.
540 *
541 * In particular %g2, %g3, %g4, and %g5 are all assumed to be
542 * preserved across a system call trap by various pieces of
543 * code in glibc.
544 *
545 * %g7 is used as the "thread register". %g6 is not used in
546 * any fixed manner. %g6 is used as a scratch register and
547 * a compiler temporary, but it's value is never used across
548 * a system call. Therefore %g6 is usable for orig_i0 storage.
549 */
532 if (pt_regs_is_syscall(regs) && 550 if (pt_regs_is_syscall(regs) &&
533 (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) { 551 (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
534 restart_syscall = 1; 552 regs->u_regs[UREG_G6] = orig_i0;
535 } else
536 restart_syscall = 0;
537 553
538 if (current_thread_info()->status & TS_RESTORE_SIGMASK) 554 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
539 oldset = &current->saved_sigmask; 555 oldset = &current->saved_sigmask;
@@ -542,22 +558,20 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
542 558
543#ifdef CONFIG_COMPAT 559#ifdef CONFIG_COMPAT
544 if (test_thread_flag(TIF_32BIT)) { 560 if (test_thread_flag(TIF_32BIT)) {
545 extern void do_signal32(sigset_t *, struct pt_regs *, 561 extern void do_signal32(sigset_t *, struct pt_regs *);
546 int restart_syscall, 562 do_signal32(oldset, regs);
547 unsigned long orig_i0);
548 do_signal32(oldset, regs, restart_syscall, orig_i0);
549 return; 563 return;
550 } 564 }
551#endif 565#endif
552 566
553 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 567 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
554 568
555 /* If the debugger messes with the program counter, it clears 569 restart_syscall = 0;
556 * the software "in syscall" bit, directing us to not perform 570 if (pt_regs_is_syscall(regs) &&
557 * a syscall restart. 571 (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
558 */ 572 restart_syscall = 1;
559 if (restart_syscall && !pt_regs_is_syscall(regs)) 573 orig_i0 = regs->u_regs[UREG_G6];
560 restart_syscall = 0; 574 }
561 575
562 if (signr > 0) { 576 if (signr > 0) {
563 if (restart_syscall) 577 if (restart_syscall)
diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
index e7dc508c38eb..b19570d41a39 100644
--- a/arch/sparc/kernel/sigutil_64.c
+++ b/arch/sparc/kernel/sigutil_64.c
@@ -2,6 +2,7 @@
2#include <linux/types.h> 2#include <linux/types.h>
3#include <linux/thread_info.h> 3#include <linux/thread_info.h>
4#include <linux/uaccess.h> 4#include <linux/uaccess.h>
5#include <linux/errno.h>
5 6
6#include <asm/sigcontext.h> 7#include <asm/sigcontext.h>
7#include <asm/fpumacro.h> 8#include <asm/fpumacro.h>
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 09d8ec454450..63402f9e9f51 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -84,4 +84,4 @@ sys_call_table:
84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index edbec45d4688..db86b1a0e9a9 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -85,7 +85,7 @@ sys_call_table32:
85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv 85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init 86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
87/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 87/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
89 89
90#endif /* CONFIG_COMPAT */ 90#endif /* CONFIG_COMPAT */
91 91
@@ -162,4 +162,4 @@ sys_call_table:
162/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 162/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
163 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 163 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
164/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 164/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
165 .word sys_syncfs, sys_sendmmsg, sys_setns 165 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index e3cda21b5ee9..301421c11291 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
8obj-y += fault_$(BITS).o 8obj-y += fault_$(BITS).o
9obj-y += init_$(BITS).o 9obj-y += init_$(BITS).o
10obj-$(CONFIG_SPARC32) += loadmmu.o 10obj-$(CONFIG_SPARC32) += loadmmu.o
11obj-y += generic_$(BITS).o
12obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o 11obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o
13obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o 12obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
14obj-$(CONFIG_SPARC_LEON)+= leon_mm.o 13obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
index 5175ac2f4820..8a7f81743c12 100644
--- a/arch/sparc/mm/btfixup.c
+++ b/arch/sparc/mm/btfixup.c
@@ -302,8 +302,7 @@ void __init btfixup(void)
302 case 'i': /* INT */ 302 case 'i': /* INT */
303 if ((insn & 0xc1c00000) == 0x01000000) /* %HI */ 303 if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
304 set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10)); 304 set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
305 else if ((insn & 0x80002000) == 0x80002000 && 305 else if ((insn & 0x80002000) == 0x80002000) /* %LO */
306 (insn & 0x01800000) != 0x01800000) /* %LO */
307 set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff)); 306 set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
308 else { 307 else {
309 prom_printf(insn_i, p, addr, insn); 308 prom_printf(insn_i, p, addr, insn);
diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
deleted file mode 100644
index 6ca39a60a196..000000000000
--- a/arch/sparc/mm/generic_32.c
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * generic.c: Generic Sparc mm routines that are not dependent upon
3 * MMU type but are Sparc specific.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/pagemap.h>
12#include <linux/export.h>
13
14#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16#include <asm/page.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19
20/* Remap IO memory, the same way as remap_pfn_range(), but use
21 * the obio memory space.
22 *
23 * They use a pgprot that sets PAGE_IO and does not check the
24 * mem_map table as this is independent of normal memory.
25 */
26static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
27 unsigned long offset, pgprot_t prot, int space)
28{
29 unsigned long end;
30
31 address &= ~PMD_MASK;
32 end = address + size;
33 if (end > PMD_SIZE)
34 end = PMD_SIZE;
35 do {
36 set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space));
37 address += PAGE_SIZE;
38 offset += PAGE_SIZE;
39 pte++;
40 } while (address < end);
41}
42
43static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
44 unsigned long offset, pgprot_t prot, int space)
45{
46 unsigned long end;
47
48 address &= ~PGDIR_MASK;
49 end = address + size;
50 if (end > PGDIR_SIZE)
51 end = PGDIR_SIZE;
52 offset -= address;
53 do {
54 pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
55 if (!pte)
56 return -ENOMEM;
57 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
58 address = (address + PMD_SIZE) & PMD_MASK;
59 pmd++;
60 } while (address < end);
61 return 0;
62}
63
64int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
65 unsigned long pfn, unsigned long size, pgprot_t prot)
66{
67 int error = 0;
68 pgd_t * dir;
69 unsigned long beg = from;
70 unsigned long end = from + size;
71 struct mm_struct *mm = vma->vm_mm;
72 int space = GET_IOSPACE(pfn);
73 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
74
75 /* See comment in mm/memory.c remap_pfn_range */
76 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
77 vma->vm_pgoff = (offset >> PAGE_SHIFT) |
78 ((unsigned long)space << 28UL);
79
80 offset -= from;
81 dir = pgd_offset(mm, from);
82 flush_cache_range(vma, beg, end);
83
84 while (from < end) {
85 pmd_t *pmd = pmd_alloc(mm, dir, from);
86 error = -ENOMEM;
87 if (!pmd)
88 break;
89 error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
90 if (error)
91 break;
92 from = (from + PGDIR_SIZE) & PGDIR_MASK;
93 dir++;
94 }
95
96 flush_tlb_range(vma, beg, end);
97 return error;
98}
99EXPORT_SYMBOL(io_remap_pfn_range);
diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
deleted file mode 100644
index 9b357ddae39d..000000000000
--- a/arch/sparc/mm/generic_64.c
+++ /dev/null
@@ -1,165 +0,0 @@
1/*
2 * generic.c: Generic Sparc mm routines that are not dependent upon
3 * MMU type but are Sparc specific.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/export.h>
12#include <linux/pagemap.h>
13
14#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16#include <asm/page.h>
17#include <asm/tlbflush.h>
18
19/* Remap IO memory, the same way as remap_pfn_range(), but use
20 * the obio memory space.
21 *
22 * They use a pgprot that sets PAGE_IO and does not check the
23 * mem_map table as this is independent of normal memory.
24 */
25static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
26 unsigned long address,
27 unsigned long size,
28 unsigned long offset, pgprot_t prot,
29 int space)
30{
31 unsigned long end;
32
33 /* clear hack bit that was used as a write_combine side-effect flag */
34 offset &= ~0x1UL;
35 address &= ~PMD_MASK;
36 end = address + size;
37 if (end > PMD_SIZE)
38 end = PMD_SIZE;
39 do {
40 pte_t entry;
41 unsigned long curend = address + PAGE_SIZE;
42
43 entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
44 if (!(address & 0xffff)) {
45 if (PAGE_SIZE < (4 * 1024 * 1024) &&
46 !(address & 0x3fffff) &&
47 !(offset & 0x3ffffe) &&
48 end >= address + 0x400000) {
49 entry = mk_pte_io(offset, prot, space,
50 4 * 1024 * 1024);
51 curend = address + 0x400000;
52 offset += 0x400000;
53 } else if (PAGE_SIZE < (512 * 1024) &&
54 !(address & 0x7ffff) &&
55 !(offset & 0x7fffe) &&
56 end >= address + 0x80000) {
57 entry = mk_pte_io(offset, prot, space,
58 512 * 1024 * 1024);
59 curend = address + 0x80000;
60 offset += 0x80000;
61 } else if (PAGE_SIZE < (64 * 1024) &&
62 !(offset & 0xfffe) &&
63 end >= address + 0x10000) {
64 entry = mk_pte_io(offset, prot, space,
65 64 * 1024);
66 curend = address + 0x10000;
67 offset += 0x10000;
68 } else
69 offset += PAGE_SIZE;
70 } else
71 offset += PAGE_SIZE;
72
73 if (pte_write(entry))
74 entry = pte_mkdirty(entry);
75 do {
76 BUG_ON(!pte_none(*pte));
77 set_pte_at(mm, address, pte, entry);
78 address += PAGE_SIZE;
79 pte_val(entry) += PAGE_SIZE;
80 pte++;
81 } while (address < curend);
82 } while (address < end);
83}
84
85static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
86 unsigned long offset, pgprot_t prot, int space)
87{
88 unsigned long end;
89
90 address &= ~PGDIR_MASK;
91 end = address + size;
92 if (end > PGDIR_SIZE)
93 end = PGDIR_SIZE;
94 offset -= address;
95 do {
96 pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
97 if (!pte)
98 return -ENOMEM;
99 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
100 pte_unmap(pte);
101 address = (address + PMD_SIZE) & PMD_MASK;
102 pmd++;
103 } while (address < end);
104 return 0;
105}
106
107static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
108 unsigned long offset, pgprot_t prot, int space)
109{
110 unsigned long end;
111
112 address &= ~PUD_MASK;
113 end = address + size;
114 if (end > PUD_SIZE)
115 end = PUD_SIZE;
116 offset -= address;
117 do {
118 pmd_t *pmd = pmd_alloc(mm, pud, address);
119 if (!pud)
120 return -ENOMEM;
121 io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
122 address = (address + PUD_SIZE) & PUD_MASK;
123 pud++;
124 } while (address < end);
125 return 0;
126}
127
128int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
129 unsigned long pfn, unsigned long size, pgprot_t prot)
130{
131 int error = 0;
132 pgd_t * dir;
133 unsigned long beg = from;
134 unsigned long end = from + size;
135 struct mm_struct *mm = vma->vm_mm;
136 int space = GET_IOSPACE(pfn);
137 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
138 unsigned long phys_base;
139
140 phys_base = offset | (((unsigned long) space) << 32UL);
141
142 /* See comment in mm/memory.c remap_pfn_range */
143 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
144 vma->vm_pgoff = phys_base >> PAGE_SHIFT;
145
146 offset -= from;
147 dir = pgd_offset(mm, from);
148 flush_cache_range(vma, beg, end);
149
150 while (from < end) {
151 pud_t *pud = pud_alloc(mm, dir, from);
152 error = -ENOMEM;
153 if (!pud)
154 break;
155 error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
156 if (error)
157 break;
158 from = (from + PGDIR_SIZE) & PGDIR_MASK;
159 dir++;
160 }
161
162 flush_tlb_range(vma, beg, end);
163 return error;
164}
165EXPORT_SYMBOL(io_remap_pfn_range);
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
index 94e9a511de84..f80f8ceabc67 100644
--- a/arch/tile/include/asm/irq.h
+++ b/arch/tile/include/asm/irq.h
@@ -74,16 +74,6 @@ enum {
74 */ 74 */
75void tile_irq_activate(unsigned int irq, int tile_irq_type); 75void tile_irq_activate(unsigned int irq, int tile_irq_type);
76 76
77/*
78 * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know
79 * how to use enable/disable_percpu_irq() to manage interrupts on each
80 * core. We can't use the generic enable/disable_irq() because they
81 * use a single reference count per irq, rather than per cpu per irq.
82 */
83void enable_percpu_irq(unsigned int irq);
84void disable_percpu_irq(unsigned int irq);
85
86
87void setup_irq_regs(void); 77void setup_irq_regs(void);
88 78
89#endif /* _ASM_TILE_IRQ_H */ 79#endif /* _ASM_TILE_IRQ_H */
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index aa0134db2dd6..02e628065012 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -152,14 +152,13 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
152 * Remove an irq from the disabled mask. If we're in an interrupt 152 * Remove an irq from the disabled mask. If we're in an interrupt
153 * context, defer enabling the HW interrupt until we leave. 153 * context, defer enabling the HW interrupt until we leave.
154 */ 154 */
155void enable_percpu_irq(unsigned int irq) 155static void tile_irq_chip_enable(struct irq_data *d)
156{ 156{
157 get_cpu_var(irq_disable_mask) &= ~(1UL << irq); 157 get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
158 if (__get_cpu_var(irq_depth) == 0) 158 if (__get_cpu_var(irq_depth) == 0)
159 unmask_irqs(1UL << irq); 159 unmask_irqs(1UL << d->irq);
160 put_cpu_var(irq_disable_mask); 160 put_cpu_var(irq_disable_mask);
161} 161}
162EXPORT_SYMBOL(enable_percpu_irq);
163 162
164/* 163/*
165 * Add an irq to the disabled mask. We disable the HW interrupt 164 * Add an irq to the disabled mask. We disable the HW interrupt
@@ -167,13 +166,12 @@ EXPORT_SYMBOL(enable_percpu_irq);
167 * in an interrupt context, the return path is careful to avoid 166 * in an interrupt context, the return path is careful to avoid
168 * unmasking a newly disabled interrupt. 167 * unmasking a newly disabled interrupt.
169 */ 168 */
170void disable_percpu_irq(unsigned int irq) 169static void tile_irq_chip_disable(struct irq_data *d)
171{ 170{
172 get_cpu_var(irq_disable_mask) |= (1UL << irq); 171 get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
173 mask_irqs(1UL << irq); 172 mask_irqs(1UL << d->irq);
174 put_cpu_var(irq_disable_mask); 173 put_cpu_var(irq_disable_mask);
175} 174}
176EXPORT_SYMBOL(disable_percpu_irq);
177 175
178/* Mask an interrupt. */ 176/* Mask an interrupt. */
179static void tile_irq_chip_mask(struct irq_data *d) 177static void tile_irq_chip_mask(struct irq_data *d)
@@ -209,6 +207,8 @@ static void tile_irq_chip_eoi(struct irq_data *d)
209 207
210static struct irq_chip tile_irq_chip = { 208static struct irq_chip tile_irq_chip = {
211 .name = "tile_irq_chip", 209 .name = "tile_irq_chip",
210 .irq_enable = tile_irq_chip_enable,
211 .irq_disable = tile_irq_chip_disable,
212 .irq_ack = tile_irq_chip_ack, 212 .irq_ack = tile_irq_chip_ack,
213 .irq_eoi = tile_irq_chip_eoi, 213 .irq_eoi = tile_irq_chip_eoi,
214 .irq_mask = tile_irq_chip_mask, 214 .irq_mask = tile_irq_chip_mask,
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 658f2ce426a4..b3ed19f8779c 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -15,6 +15,7 @@
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/dma-mapping.h> 16#include <linux/dma-mapping.h>
17#include <linux/vmalloc.h> 17#include <linux/vmalloc.h>
18#include <linux/export.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
19#include <asm/homecache.h> 20#include <asm/homecache.h>
20 21
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index 2a8014cb1ff5..9d610d3fb11e 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -24,6 +24,7 @@
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/export.h>
27 28
28#include <asm/processor.h> 29#include <asm/processor.h>
29#include <asm/sections.h> 30#include <asm/sections.h>
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
index b671a86f4515..602908268093 100644
--- a/arch/tile/kernel/sysfs.c
+++ b/arch/tile/kernel/sysfs.c
@@ -18,6 +18,7 @@
18#include <linux/cpu.h> 18#include <linux/cpu.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/smp.h> 20#include <linux/smp.h>
21#include <linux/stat.h>
21#include <hv/hypervisor.h> 22#include <hv/hypervisor.h>
22 23
23/* Return a string queried from the hypervisor, truncated to page size. */ 24/* Return a string queried from the hypervisor, truncated to page size. */
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index a87d2a859ba9..2a81d32de0da 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -39,6 +39,9 @@ EXPORT_SYMBOL(finv_user_asm);
39EXPORT_SYMBOL(current_text_addr); 39EXPORT_SYMBOL(current_text_addr);
40EXPORT_SYMBOL(dump_stack); 40EXPORT_SYMBOL(dump_stack);
41 41
42/* arch/tile/kernel/head.S */
43EXPORT_SYMBOL(empty_zero_page);
44
42/* arch/tile/lib/, various memcpy files */ 45/* arch/tile/lib/, various memcpy files */
43EXPORT_SYMBOL(memcpy); 46EXPORT_SYMBOL(memcpy);
44EXPORT_SYMBOL(__copy_to_user_inatomic); 47EXPORT_SYMBOL(__copy_to_user_inatomic);
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index cbe6f4f9eca3..1cc6ae477c98 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -449,9 +449,12 @@ void homecache_free_pages(unsigned long addr, unsigned int order)
449 VM_BUG_ON(!virt_addr_valid((void *)addr)); 449 VM_BUG_ON(!virt_addr_valid((void *)addr));
450 page = virt_to_page((void *)addr); 450 page = virt_to_page((void *)addr);
451 if (put_page_testzero(page)) { 451 if (put_page_testzero(page)) {
452 int pages = (1 << order);
453 homecache_change_page_home(page, order, initial_page_home()); 452 homecache_change_page_home(page, order, initial_page_home());
454 while (pages--) 453 if (order == 0) {
455 __free_page(page++); 454 free_hot_cold_page(page, 0);
455 } else {
456 init_page_count(page);
457 __free_pages(page, order);
458 }
456 } 459 }
457} 460}
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index e57dcce9bfda..942ed6174f1d 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -237,13 +237,13 @@ menu "PKUnity NetBook-0916 Features"
237 237
238config I2C_BATTERY_BQ27200 238config I2C_BATTERY_BQ27200
239 tristate "I2C Battery BQ27200 Support" 239 tristate "I2C Battery BQ27200 Support"
240 select PUV3_I2C 240 select I2C_PUV3
241 select POWER_SUPPLY 241 select POWER_SUPPLY
242 select BATTERY_BQ27x00 242 select BATTERY_BQ27x00
243 243
244config I2C_EEPROM_AT24 244config I2C_EEPROM_AT24
245 tristate "I2C EEPROMs AT24 support" 245 tristate "I2C EEPROMs AT24 support"
246 select PUV3_I2C 246 select I2C_PUV3
247 select MISC_DEVICES 247 select MISC_DEVICES
248 select EEPROM_AT24 248 select EEPROM_AT24
249 249
diff --git a/arch/unicore32/Kconfig.debug b/arch/unicore32/Kconfig.debug
index ae2ec334c3c6..1a3626239843 100644
--- a/arch/unicore32/Kconfig.debug
+++ b/arch/unicore32/Kconfig.debug
@@ -44,18 +44,4 @@ config DEBUG_OCD
44 Say Y here if you want the debug print routines to direct their 44 Say Y here if you want the debug print routines to direct their
45 output to the UniCore On-Chip-Debugger channel using CP #1. 45 output to the UniCore On-Chip-Debugger channel using CP #1.
46 46
47config DEBUG_OCD_BREAKPOINT
48 bool "Breakpoint support via On-Chip-Debugger"
49 depends on DEBUG_OCD
50
51config DEBUG_UART
52 int "Kernel low-level debugging messages via serial port"
53 depends on DEBUG_LL
54 range 0 1
55 default "0"
56 help
57 Choice for UART for kernel low-level using PKUnity UARTS,
58 should be between zero and one. The port must have been
59 initialised by the boot-loader before use.
60
61endmenu 47endmenu
diff --git a/arch/unicore32/boot/compressed/Makefile b/arch/unicore32/boot/compressed/Makefile
index b0954a2d23cf..950a9afa38f8 100644
--- a/arch/unicore32/boot/compressed/Makefile
+++ b/arch/unicore32/boot/compressed/Makefile
@@ -10,8 +10,8 @@
10# Copyright (C) 2001~2010 GUAN Xue-tao 10# Copyright (C) 2001~2010 GUAN Xue-tao
11# 11#
12 12
13EXTRA_CFLAGS := -fpic -fno-builtin 13ccflags-y := -fpic -fno-builtin
14EXTRA_AFLAGS := -Wa,-march=all 14asflags-y := -Wa,-march=all
15 15
16OBJS := misc.o 16OBJS := misc.o
17 17
diff --git a/arch/unicore32/include/asm/bitops.h b/arch/unicore32/include/asm/bitops.h
index 1628a6328994..401f597bc38c 100644
--- a/arch/unicore32/include/asm/bitops.h
+++ b/arch/unicore32/include/asm/bitops.h
@@ -13,12 +13,6 @@
13#ifndef __UNICORE_BITOPS_H__ 13#ifndef __UNICORE_BITOPS_H__
14#define __UNICORE_BITOPS_H__ 14#define __UNICORE_BITOPS_H__
15 15
16#define find_next_bit __uc32_find_next_bit
17#define find_next_zero_bit __uc32_find_next_zero_bit
18
19#define find_first_bit __uc32_find_first_bit
20#define find_first_zero_bit __uc32_find_first_zero_bit
21
22#define _ASM_GENERIC_BITOPS_FLS_H_ 16#define _ASM_GENERIC_BITOPS_FLS_H_
23#define _ASM_GENERIC_BITOPS___FLS_H_ 17#define _ASM_GENERIC_BITOPS___FLS_H_
24#define _ASM_GENERIC_BITOPS_FFS_H_ 18#define _ASM_GENERIC_BITOPS_FFS_H_
@@ -44,4 +38,10 @@ static inline int fls(int x)
44 38
45#include <asm-generic/bitops.h> 39#include <asm-generic/bitops.h>
46 40
41/* following definitions: to avoid using codes in lib/find_*.c */
42#define find_next_bit find_next_bit
43#define find_next_zero_bit find_next_zero_bit
44#define find_first_bit find_first_bit
45#define find_first_zero_bit find_first_zero_bit
46
47#endif /* __UNICORE_BITOPS_H__ */ 47#endif /* __UNICORE_BITOPS_H__ */
diff --git a/arch/unicore32/include/asm/processor.h b/arch/unicore32/include/asm/processor.h
index e11cb0786578..f0d780a51f9b 100644
--- a/arch/unicore32/include/asm/processor.h
+++ b/arch/unicore32/include/asm/processor.h
@@ -53,7 +53,6 @@ struct thread_struct {
53#define start_thread(regs, pc, sp) \ 53#define start_thread(regs, pc, sp) \
54({ \ 54({ \
55 unsigned long *stack = (unsigned long *)sp; \ 55 unsigned long *stack = (unsigned long *)sp; \
56 set_fs(USER_DS); \
57 memset(regs->uregs, 0, sizeof(regs->uregs)); \ 56 memset(regs->uregs, 0, sizeof(regs->uregs)); \
58 regs->UCreg_asr = USER_MODE; \ 57 regs->UCreg_asr = USER_MODE; \
59 regs->UCreg_pc = pc & ~1; /* pc */ \ 58 regs->UCreg_pc = pc & ~1; /* pc */ \
diff --git a/arch/unicore32/kernel/ksyms.c b/arch/unicore32/kernel/ksyms.c
index a8970809428a..d98bd812cae1 100644
--- a/arch/unicore32/kernel/ksyms.c
+++ b/arch/unicore32/kernel/ksyms.c
@@ -24,8 +24,8 @@
24 24
25#include "ksyms.h" 25#include "ksyms.h"
26 26
27EXPORT_SYMBOL(__uc32_find_next_zero_bit); 27EXPORT_SYMBOL(find_next_zero_bit);
28EXPORT_SYMBOL(__uc32_find_next_bit); 28EXPORT_SYMBOL(find_next_bit);
29 29
30EXPORT_SYMBOL(__backtrace); 30EXPORT_SYMBOL(__backtrace);
31 31
diff --git a/arch/unicore32/lib/findbit.S b/arch/unicore32/lib/findbit.S
index c360ce905d8b..c77746247d36 100644
--- a/arch/unicore32/lib/findbit.S
+++ b/arch/unicore32/lib/findbit.S
@@ -17,7 +17,7 @@
17 * Purpose : Find a 'zero' bit 17 * Purpose : Find a 'zero' bit
18 * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit); 18 * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit);
19 */ 19 */
20__uc32_find_first_zero_bit: 20ENTRY(find_first_zero_bit)
21 cxor.a r1, #0 21 cxor.a r1, #0
22 beq 3f 22 beq 3f
23 mov r2, #0 23 mov r2, #0
@@ -29,13 +29,14 @@ __uc32_find_first_zero_bit:
29 bub 1b 29 bub 1b
303: mov r0, r1 @ no free bits 303: mov r0, r1 @ no free bits
31 mov pc, lr 31 mov pc, lr
32ENDPROC(find_first_zero_bit)
32 33
33/* 34/*
34 * Purpose : Find next 'zero' bit 35 * Purpose : Find next 'zero' bit
35 * Prototype: int find_next_zero_bit 36 * Prototype: int find_next_zero_bit
36 * (void *addr, unsigned int maxbit, int offset) 37 * (void *addr, unsigned int maxbit, int offset)
37 */ 38 */
38ENTRY(__uc32_find_next_zero_bit) 39ENTRY(find_next_zero_bit)
39 cxor.a r1, #0 40 cxor.a r1, #0
40 beq 3b 41 beq 3b
41 and.a ip, r2, #7 42 and.a ip, r2, #7
@@ -47,14 +48,14 @@ ENTRY(__uc32_find_next_zero_bit)
47 or r2, r2, #7 @ if zero, then no bits here 48 or r2, r2, #7 @ if zero, then no bits here
48 add r2, r2, #1 @ align bit pointer 49 add r2, r2, #1 @ align bit pointer
49 b 2b @ loop for next bit 50 b 2b @ loop for next bit
50ENDPROC(__uc32_find_next_zero_bit) 51ENDPROC(find_next_zero_bit)
51 52
52/* 53/*
53 * Purpose : Find a 'one' bit 54 * Purpose : Find a 'one' bit
54 * Prototype: int find_first_bit 55 * Prototype: int find_first_bit
55 * (const unsigned long *addr, unsigned int maxbit); 56 * (const unsigned long *addr, unsigned int maxbit);
56 */ 57 */
57__uc32_find_first_bit: 58ENTRY(find_first_bit)
58 cxor.a r1, #0 59 cxor.a r1, #0
59 beq 3f 60 beq 3f
60 mov r2, #0 61 mov r2, #0
@@ -66,13 +67,14 @@ __uc32_find_first_bit:
66 bub 1b 67 bub 1b
673: mov r0, r1 @ no free bits 683: mov r0, r1 @ no free bits
68 mov pc, lr 69 mov pc, lr
70ENDPROC(find_first_bit)
69 71
70/* 72/*
71 * Purpose : Find next 'one' bit 73 * Purpose : Find next 'one' bit
72 * Prototype: int find_next_zero_bit 74 * Prototype: int find_next_zero_bit
73 * (void *addr, unsigned int maxbit, int offset) 75 * (void *addr, unsigned int maxbit, int offset)
74 */ 76 */
75ENTRY(__uc32_find_next_bit) 77ENTRY(find_next_bit)
76 cxor.a r1, #0 78 cxor.a r1, #0
77 beq 3b 79 beq 3b
78 and.a ip, r2, #7 80 and.a ip, r2, #7
@@ -83,7 +85,7 @@ ENTRY(__uc32_find_next_bit)
83 or r2, r2, #7 @ if zero, then no bits here 85 or r2, r2, #7 @ if zero, then no bits here
84 add r2, r2, #1 @ align bit pointer 86 add r2, r2, #1 @ align bit pointer
85 b 2b @ loop for next bit 87 b 2b @ loop for next bit
86ENDPROC(__uc32_find_next_bit) 88ENDPROC(find_next_bit)
87 89
88/* 90/*
89 * One or more bits in the LSB of r3 are assumed to be set. 91 * One or more bits in the LSB of r3 are assumed to be set.
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cb9a1044a771..efb42949cc09 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -390,7 +390,7 @@ config X86_INTEL_CE
390 This option compiles in support for the CE4100 SOC for settop 390 This option compiles in support for the CE4100 SOC for settop
391 boxes and media devices. 391 boxes and media devices.
392 392
393config X86_INTEL_MID 393config X86_WANT_INTEL_MID
394 bool "Intel MID platform support" 394 bool "Intel MID platform support"
395 depends on X86_32 395 depends on X86_32
396 depends on X86_EXTENDED_PLATFORM 396 depends on X86_EXTENDED_PLATFORM
@@ -399,7 +399,10 @@ config X86_INTEL_MID
399 systems which do not have the PCI legacy interfaces (Moorestown, 399 systems which do not have the PCI legacy interfaces (Moorestown,
400 Medfield). If you are building for a PC class system say N here. 400 Medfield). If you are building for a PC class system say N here.
401 401
402if X86_INTEL_MID 402if X86_WANT_INTEL_MID
403
404config X86_INTEL_MID
405 bool
403 406
404config X86_MRST 407config X86_MRST
405 bool "Moorestown MID platform" 408 bool "Moorestown MID platform"
@@ -411,6 +414,7 @@ config X86_MRST
411 select SPI 414 select SPI
412 select INTEL_SCU_IPC 415 select INTEL_SCU_IPC
413 select X86_PLATFORM_DEVICES 416 select X86_PLATFORM_DEVICES
417 select X86_INTEL_MID
414 ---help--- 418 ---help---
415 Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin 419 Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
416 Internet Device(MID) platform. Moorestown consists of two chips: 420 Internet Device(MID) platform. Moorestown consists of two chips:
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 9b7273cb2193..1a6c09af048f 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -49,6 +49,7 @@ extern unsigned int apic_verbosity;
49extern int local_apic_timer_c2_ok; 49extern int local_apic_timer_c2_ok;
50 50
51extern int disable_apic; 51extern int disable_apic;
52extern unsigned int lapic_timer_frequency;
52 53
53#ifdef CONFIG_SMP 54#ifdef CONFIG_SMP
54extern void __inquire_remote_apic(int apicid); 55extern void __inquire_remote_apic(int apicid);
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h
index 4420993acc47..925b605eb5c6 100644
--- a/arch/x86/include/asm/intel_scu_ipc.h
+++ b/arch/x86/include/asm/intel_scu_ipc.h
@@ -3,11 +3,15 @@
3 3
4#include <linux/notifier.h> 4#include <linux/notifier.h>
5 5
6#define IPCMSG_VRTC 0xFA /* Set vRTC device */ 6#define IPCMSG_WARM_RESET 0xF0
7 7#define IPCMSG_COLD_RESET 0xF1
8/* Command id associated with message IPCMSG_VRTC */ 8#define IPCMSG_SOFT_RESET 0xF2
9#define IPC_CMD_VRTC_SETTIME 1 /* Set time */ 9#define IPCMSG_COLD_BOOT 0xF3
10#define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */ 10
11#define IPCMSG_VRTC 0xFA /* Set vRTC device */
12 /* Command id associated with message IPCMSG_VRTC */
13 #define IPC_CMD_VRTC_SETTIME 1 /* Set time */
14 #define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
11 15
12/* Read single register */ 16/* Read single register */
13int intel_scu_ipc_ioread8(u16 addr, u8 *data); 17int intel_scu_ipc_ioread8(u16 addr, u8 *data);
diff --git a/arch/x86/include/asm/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index 72a8b52e7dfd..a01e7ec7d237 100644
--- a/arch/x86/include/asm/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
@@ -17,7 +17,7 @@
17#define NMI_REASON_CLEAR_IOCHK 0x08 17#define NMI_REASON_CLEAR_IOCHK 0x08
18#define NMI_REASON_CLEAR_MASK 0x0f 18#define NMI_REASON_CLEAR_MASK 0x0f
19 19
20static inline unsigned char get_nmi_reason(void) 20static inline unsigned char default_get_nmi_reason(void)
21{ 21{
22 return inb(NMI_REASON_PORT); 22 return inb(NMI_REASON_PORT);
23} 23}
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index c9321f34e55b..0e8ae57d3656 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -201,7 +201,10 @@ int mce_notify_irq(void);
201void mce_notify_process(void); 201void mce_notify_process(void);
202 202
203DECLARE_PER_CPU(struct mce, injectm); 203DECLARE_PER_CPU(struct mce, injectm);
204extern struct file_operations mce_chrdev_ops; 204
205extern void register_mce_write_callback(ssize_t (*)(struct file *filp,
206 const char __user *ubuf,
207 size_t usize, loff_t *off));
205 208
206/* 209/*
207 * Exception handler 210 * Exception handler
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
index 719f00b28ff5..93f79094c224 100644
--- a/arch/x86/include/asm/mrst.h
+++ b/arch/x86/include/asm/mrst.h
@@ -31,11 +31,20 @@ enum mrst_cpu_type {
31}; 31};
32 32
33extern enum mrst_cpu_type __mrst_cpu_chip; 33extern enum mrst_cpu_type __mrst_cpu_chip;
34
35#ifdef CONFIG_X86_INTEL_MID
36
34static inline enum mrst_cpu_type mrst_identify_cpu(void) 37static inline enum mrst_cpu_type mrst_identify_cpu(void)
35{ 38{
36 return __mrst_cpu_chip; 39 return __mrst_cpu_chip;
37} 40}
38 41
42#else /* !CONFIG_X86_INTEL_MID */
43
44#define mrst_identify_cpu() (0)
45
46#endif /* !CONFIG_X86_INTEL_MID */
47
39enum mrst_timer_options { 48enum mrst_timer_options {
40 MRST_TIMER_DEFAULT, 49 MRST_TIMER_DEFAULT,
41 MRST_TIMER_APBT_ONLY, 50 MRST_TIMER_APBT_ONLY,
@@ -44,6 +53,13 @@ enum mrst_timer_options {
44 53
45extern enum mrst_timer_options mrst_timer_options; 54extern enum mrst_timer_options mrst_timer_options;
46 55
56/*
57 * Penwell uses spread spectrum clock, so the freq number is not exactly
58 * the same as reported by MSR based on SDM.
59 */
60#define PENWELL_FSB_FREQ_83SKU 83200
61#define PENWELL_FSB_FREQ_100SKU 99840
62
47#define SFI_MTMR_MAX_NUM 8 63#define SFI_MTMR_MAX_NUM 8
48#define SFI_MRTC_MAX 8 64#define SFI_MRTC_MAX 8
49 65
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 084ef95274cd..95203d40ffdd 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -169,7 +169,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
169 return native_write_msr_safe(msr, low, high); 169 return native_write_msr_safe(msr, low, high);
170} 170}
171 171
172/* rdmsr with exception handling */ 172/*
173 * rdmsr with exception handling.
174 *
175 * Please note that the exception handling works only after we've
176 * switched to the "smart" #GP handler in trap_init() which knows about
177 * exception tables - using this macro earlier than that causes machine
178 * hangs on boxes which do not implement the @msr in the first argument.
179 */
173#define rdmsr_safe(msr, p1, p2) \ 180#define rdmsr_safe(msr, p1, p2) \
174({ \ 181({ \
175 int __err; \ 182 int __err; \
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index c2ff2a1d845e..2d2f01ce6dcb 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -401,6 +401,7 @@ extern unsigned long arch_align_stack(unsigned long sp);
401extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 401extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
402 402
403void default_idle(void); 403void default_idle(void);
404bool set_pm_idle_to_default(void);
404 405
405void stop_this_cpu(void *dummy); 406void stop_this_cpu(void *dummy);
406 407
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index fa7b9176b76c..431793e5d484 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -32,6 +32,22 @@ extern int no_timer_check;
32 * (mathieu.desnoyers@polymtl.ca) 32 * (mathieu.desnoyers@polymtl.ca)
33 * 33 *
34 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 34 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
35 *
36 * In:
37 *
38 * ns = cycles * cyc2ns_scale / SC
39 *
40 * Although we may still have enough bits to store the value of ns,
41 * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
42 * leading to an incorrect result.
43 *
44 * To avoid this, we can decompose 'cycles' into quotient and remainder
45 * of division by SC. Then,
46 *
47 * ns = (quot * SC + rem) * cyc2ns_scale / SC
48 * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
49 *
50 * - sqazi@google.com
35 */ 51 */
36 52
37DECLARE_PER_CPU(unsigned long, cyc2ns); 53DECLARE_PER_CPU(unsigned long, cyc2ns);
@@ -41,9 +57,14 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
41 57
42static inline unsigned long long __cycles_2_ns(unsigned long long cyc) 58static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
43{ 59{
60 unsigned long long quot;
61 unsigned long long rem;
44 int cpu = smp_processor_id(); 62 int cpu = smp_processor_id();
45 unsigned long long ns = per_cpu(cyc2ns_offset, cpu); 63 unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
46 ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR; 64 quot = (cyc >> CYC2NS_SCALE_FACTOR);
65 rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
66 ns += quot * per_cpu(cyc2ns, cpu) +
67 ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
47 return ns; 68 return ns;
48} 69}
49 70
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 10474fb1185d..cf1d73643f60 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -57,6 +57,7 @@
57 57
58#define UV1_HUB_PART_NUMBER 0x88a5 58#define UV1_HUB_PART_NUMBER 0x88a5
59#define UV2_HUB_PART_NUMBER 0x8eb8 59#define UV2_HUB_PART_NUMBER 0x8eb8
60#define UV2_HUB_PART_NUMBER_X 0x1111
60 61
61/* Compat: if this #define is present, UV headers support UV2 */ 62/* Compat: if this #define is present, UV headers support UV2 */
62#define UV2_HUB_IS_SUPPORTED 1 63#define UV2_HUB_IS_SUPPORTED 1
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index d3d859035af9..1971e652d24b 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -152,6 +152,7 @@ struct x86_cpuinit_ops {
152/** 152/**
153 * struct x86_platform_ops - platform specific runtime functions 153 * struct x86_platform_ops - platform specific runtime functions
154 * @calibrate_tsc: calibrate TSC 154 * @calibrate_tsc: calibrate TSC
155 * @wallclock_init: init the wallclock device
155 * @get_wallclock: get time from HW clock like RTC etc. 156 * @get_wallclock: get time from HW clock like RTC etc.
156 * @set_wallclock: set time back to HW clock 157 * @set_wallclock: set time back to HW clock
157 * @is_untracked_pat_range exclude from PAT logic 158 * @is_untracked_pat_range exclude from PAT logic
@@ -160,11 +161,13 @@ struct x86_cpuinit_ops {
160 */ 161 */
161struct x86_platform_ops { 162struct x86_platform_ops {
162 unsigned long (*calibrate_tsc)(void); 163 unsigned long (*calibrate_tsc)(void);
164 void (*wallclock_init)(void);
163 unsigned long (*get_wallclock)(void); 165 unsigned long (*get_wallclock)(void);
164 int (*set_wallclock)(unsigned long nowtime); 166 int (*set_wallclock)(unsigned long nowtime);
165 void (*iommu_shutdown)(void); 167 void (*iommu_shutdown)(void);
166 bool (*is_untracked_pat_range)(u64 start, u64 end); 168 bool (*is_untracked_pat_range)(u64 start, u64 end);
167 void (*nmi_init)(void); 169 void (*nmi_init)(void);
170 unsigned char (*get_nmi_reason)(void);
168 int (*i8042_detect)(void); 171 int (*i8042_detect)(void);
169}; 172};
170 173
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c63822816249..1f84794f0759 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -738,5 +738,5 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
738 738
739 atomic_set(&stop_machine_first, 1); 739 atomic_set(&stop_machine_first, 1);
740 wrote_text = 0; 740 wrote_text = 0;
741 __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); 741 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
742} 742}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a2fd72e0ab35..f98d84caf94c 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -186,7 +186,7 @@ static struct resource lapic_resource = {
186 .flags = IORESOURCE_MEM | IORESOURCE_BUSY, 186 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
187}; 187};
188 188
189static unsigned int calibration_result; 189unsigned int lapic_timer_frequency = 0;
190 190
191static void apic_pm_activate(void); 191static void apic_pm_activate(void);
192 192
@@ -454,7 +454,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
454 switch (mode) { 454 switch (mode) {
455 case CLOCK_EVT_MODE_PERIODIC: 455 case CLOCK_EVT_MODE_PERIODIC:
456 case CLOCK_EVT_MODE_ONESHOT: 456 case CLOCK_EVT_MODE_ONESHOT:
457 __setup_APIC_LVTT(calibration_result, 457 __setup_APIC_LVTT(lapic_timer_frequency,
458 mode != CLOCK_EVT_MODE_PERIODIC, 1); 458 mode != CLOCK_EVT_MODE_PERIODIC, 1);
459 break; 459 break;
460 case CLOCK_EVT_MODE_UNUSED: 460 case CLOCK_EVT_MODE_UNUSED:
@@ -638,6 +638,25 @@ static int __init calibrate_APIC_clock(void)
638 long delta, deltatsc; 638 long delta, deltatsc;
639 int pm_referenced = 0; 639 int pm_referenced = 0;
640 640
641 /**
642 * check if lapic timer has already been calibrated by platform
643 * specific routine, such as tsc calibration code. if so, we just fill
644 * in the clockevent structure and return.
645 */
646
647 if (lapic_timer_frequency) {
648 apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
649 lapic_timer_frequency);
650 lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
651 TICK_NSEC, lapic_clockevent.shift);
652 lapic_clockevent.max_delta_ns =
653 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
654 lapic_clockevent.min_delta_ns =
655 clockevent_delta2ns(0xF, &lapic_clockevent);
656 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
657 return 0;
658 }
659
641 local_irq_disable(); 660 local_irq_disable();
642 661
643 /* Replace the global interrupt handler */ 662 /* Replace the global interrupt handler */
@@ -679,12 +698,12 @@ static int __init calibrate_APIC_clock(void)
679 lapic_clockevent.min_delta_ns = 698 lapic_clockevent.min_delta_ns =
680 clockevent_delta2ns(0xF, &lapic_clockevent); 699 clockevent_delta2ns(0xF, &lapic_clockevent);
681 700
682 calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS; 701 lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
683 702
684 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta); 703 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
685 apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult); 704 apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
686 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n", 705 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
687 calibration_result); 706 lapic_timer_frequency);
688 707
689 if (cpu_has_tsc) { 708 if (cpu_has_tsc) {
690 apic_printk(APIC_VERBOSE, "..... CPU clock speed is " 709 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
@@ -695,13 +714,13 @@ static int __init calibrate_APIC_clock(void)
695 714
696 apic_printk(APIC_VERBOSE, "..... host bus clock speed is " 715 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
697 "%u.%04u MHz.\n", 716 "%u.%04u MHz.\n",
698 calibration_result / (1000000 / HZ), 717 lapic_timer_frequency / (1000000 / HZ),
699 calibration_result % (1000000 / HZ)); 718 lapic_timer_frequency % (1000000 / HZ));
700 719
701 /* 720 /*
702 * Do a sanity check on the APIC calibration result 721 * Do a sanity check on the APIC calibration result
703 */ 722 */
704 if (calibration_result < (1000000 / HZ)) { 723 if (lapic_timer_frequency < (1000000 / HZ)) {
705 local_irq_enable(); 724 local_irq_enable();
706 pr_warning("APIC frequency too slow, disabling apic timer\n"); 725 pr_warning("APIC frequency too slow, disabling apic timer\n");
707 return -1; 726 return -1;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 3c31fa98af6d..6d939d7847e2 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -193,10 +193,8 @@ int __init arch_early_irq_init(void)
193 struct irq_cfg *cfg; 193 struct irq_cfg *cfg;
194 int count, node, i; 194 int count, node, i;
195 195
196 if (!legacy_pic->nr_legacy_irqs) { 196 if (!legacy_pic->nr_legacy_irqs)
197 nr_irqs_gsi = 0;
198 io_apic_irqs = ~0UL; 197 io_apic_irqs = ~0UL;
199 }
200 198
201 for (i = 0; i < nr_ioapics; i++) { 199 for (i = 0; i < nr_ioapics; i++) {
202 ioapics[i].saved_registers = 200 ioapics[i].saved_registers =
@@ -1696,6 +1694,7 @@ __apicdebuginit(void) print_IO_APICs(void)
1696 int ioapic_idx; 1694 int ioapic_idx;
1697 struct irq_cfg *cfg; 1695 struct irq_cfg *cfg;
1698 unsigned int irq; 1696 unsigned int irq;
1697 struct irq_chip *chip;
1699 1698
1700 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1699 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1701 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 1700 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++)
@@ -1716,6 +1715,10 @@ __apicdebuginit(void) print_IO_APICs(void)
1716 for_each_active_irq(irq) { 1715 for_each_active_irq(irq) {
1717 struct irq_pin_list *entry; 1716 struct irq_pin_list *entry;
1718 1717
1718 chip = irq_get_chip(irq);
1719 if (chip != &ioapic_chip)
1720 continue;
1721
1719 cfg = irq_get_chip_data(irq); 1722 cfg = irq_get_chip_data(irq);
1720 if (!cfg) 1723 if (!cfg)
1721 continue; 1724 continue;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 62ae3001ae02..9d59bbacd4e3 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -93,6 +93,8 @@ static int __init early_get_pnodeid(void)
93 93
94 if (node_id.s.part_number == UV2_HUB_PART_NUMBER) 94 if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
95 uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1; 95 uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
96 if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X)
97 uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
96 98
97 uv_hub_info->hub_revision = uv_min_hub_revision_id; 99 uv_hub_info->hub_revision = uv_min_hub_revision_id;
98 pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1); 100 pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c7e46cb35327..0bab2b18bb20 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -442,8 +442,6 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
442 442
443static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) 443static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
444{ 444{
445 u32 dummy;
446
447 early_init_amd_mc(c); 445 early_init_amd_mc(c);
448 446
449 /* 447 /*
@@ -473,12 +471,12 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
473 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 471 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
474 } 472 }
475#endif 473#endif
476
477 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
478} 474}
479 475
480static void __cpuinit init_amd(struct cpuinfo_x86 *c) 476static void __cpuinit init_amd(struct cpuinfo_x86 *c)
481{ 477{
478 u32 dummy;
479
482#ifdef CONFIG_SMP 480#ifdef CONFIG_SMP
483 unsigned long long value; 481 unsigned long long value;
484 482
@@ -657,6 +655,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
657 checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask); 655 checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
658 } 656 }
659 } 657 }
658
659 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
660} 660}
661 661
662#ifdef CONFIG_X86_32 662#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 6199232161cf..319882ef848d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -208,7 +208,7 @@ static int inject_init(void)
208 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL)) 208 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
209 return -ENOMEM; 209 return -ENOMEM;
210 printk(KERN_INFO "Machine check injector initialized\n"); 210 printk(KERN_INFO "Machine check injector initialized\n");
211 mce_chrdev_ops.write = mce_write; 211 register_mce_write_callback(mce_write);
212 register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0, 212 register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
213 "mce_notify"); 213 "mce_notify");
214 return 0; 214 return 0;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 362056aefeb4..2af127d4c3d1 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1634,16 +1634,35 @@ static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
1634 } 1634 }
1635} 1635}
1636 1636
1637/* Modified in mce-inject.c, so not static or const */ 1637static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
1638struct file_operations mce_chrdev_ops = { 1638 size_t usize, loff_t *off);
1639
1640void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
1641 const char __user *ubuf,
1642 size_t usize, loff_t *off))
1643{
1644 mce_write = fn;
1645}
1646EXPORT_SYMBOL_GPL(register_mce_write_callback);
1647
1648ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
1649 size_t usize, loff_t *off)
1650{
1651 if (mce_write)
1652 return mce_write(filp, ubuf, usize, off);
1653 else
1654 return -EINVAL;
1655}
1656
1657static const struct file_operations mce_chrdev_ops = {
1639 .open = mce_chrdev_open, 1658 .open = mce_chrdev_open,
1640 .release = mce_chrdev_release, 1659 .release = mce_chrdev_release,
1641 .read = mce_chrdev_read, 1660 .read = mce_chrdev_read,
1661 .write = mce_chrdev_write,
1642 .poll = mce_chrdev_poll, 1662 .poll = mce_chrdev_poll,
1643 .unlocked_ioctl = mce_chrdev_ioctl, 1663 .unlocked_ioctl = mce_chrdev_ioctl,
1644 .llseek = no_llseek, 1664 .llseek = no_llseek,
1645}; 1665};
1646EXPORT_SYMBOL_GPL(mce_chrdev_ops);
1647 1666
1648static struct miscdevice mce_chrdev_device = { 1667static struct miscdevice mce_chrdev_device = {
1649 MISC_MCELOG_MINOR, 1668 MISC_MCELOG_MINOR,
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index a71efcdbb092..97b26356e9ee 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -547,6 +547,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
547 547
548 if (tmp != mask_lo) { 548 if (tmp != mask_lo) {
549 printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); 549 printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
550 add_taint(TAINT_FIRMWARE_WORKAROUND);
550 mask_lo = tmp; 551 mask_lo = tmp;
551 } 552 }
552 } 553 }
@@ -693,6 +694,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
693 694
694 /* Disable MTRRs, and set the default type to uncached */ 695 /* Disable MTRRs, and set the default type to uncached */
695 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); 696 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
697 wbinvd();
696} 698}
697 699
698static void post_set(void) __releases(set_atomicity_lock) 700static void post_set(void) __releases(set_atomicity_lock)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 640891014b2a..2bda212a0010 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -312,12 +312,8 @@ int x86_setup_perfctr(struct perf_event *event)
312 return -EOPNOTSUPP; 312 return -EOPNOTSUPP;
313 } 313 }
314 314
315 /*
316 * Do not allow config1 (extended registers) to propagate,
317 * there's no sane user-space generalization yet:
318 */
319 if (attr->type == PERF_TYPE_RAW) 315 if (attr->type == PERF_TYPE_RAW)
320 return 0; 316 return x86_pmu_extra_regs(event->attr.config, event);
321 317
322 if (attr->type == PERF_TYPE_HW_CACHE) 318 if (attr->type == PERF_TYPE_HW_CACHE)
323 return set_ext_hw_attr(hwc, event); 319 return set_ext_hw_attr(hwc, event);
@@ -588,7 +584,7 @@ done:
588 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]); 584 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
589 } 585 }
590 } 586 }
591 return num ? -ENOSPC : 0; 587 return num ? -EINVAL : 0;
592} 588}
593 589
594/* 590/*
@@ -607,7 +603,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
607 603
608 if (is_x86_event(leader)) { 604 if (is_x86_event(leader)) {
609 if (n >= max_count) 605 if (n >= max_count)
610 return -ENOSPC; 606 return -EINVAL;
611 cpuc->event_list[n] = leader; 607 cpuc->event_list[n] = leader;
612 n++; 608 n++;
613 } 609 }
@@ -620,7 +616,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
620 continue; 616 continue;
621 617
622 if (n >= max_count) 618 if (n >= max_count)
623 return -ENOSPC; 619 return -EINVAL;
624 620
625 cpuc->event_list[n] = event; 621 cpuc->event_list[n] = event;
626 n++; 622 n++;
@@ -1316,7 +1312,7 @@ static int validate_event(struct perf_event *event)
1316 c = x86_pmu.get_event_constraints(fake_cpuc, event); 1312 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1317 1313
1318 if (!c || !c->weight) 1314 if (!c || !c->weight)
1319 ret = -ENOSPC; 1315 ret = -EINVAL;
1320 1316
1321 if (x86_pmu.put_event_constraints) 1317 if (x86_pmu.put_event_constraints)
1322 x86_pmu.put_event_constraints(fake_cpuc, event); 1318 x86_pmu.put_event_constraints(fake_cpuc, event);
@@ -1341,7 +1337,7 @@ static int validate_group(struct perf_event *event)
1341{ 1337{
1342 struct perf_event *leader = event->group_leader; 1338 struct perf_event *leader = event->group_leader;
1343 struct cpu_hw_events *fake_cpuc; 1339 struct cpu_hw_events *fake_cpuc;
1344 int ret = -ENOSPC, n; 1340 int ret = -EINVAL, n;
1345 1341
1346 fake_cpuc = allocate_fake_cpuc(); 1342 fake_cpuc = allocate_fake_cpuc();
1347 if (IS_ERR(fake_cpuc)) 1343 if (IS_ERR(fake_cpuc))
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index ab6343d21825..3b8a2d30d14e 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -199,8 +199,7 @@ static int force_ibs_eilvt_setup(void)
199 goto out; 199 goto out;
200 } 200 }
201 201
202 pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset); 202 pr_info("IBS: LVT offset %d assigned\n", offset);
203 pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
204 203
205 return 0; 204 return 0;
206out: 205out:
@@ -265,19 +264,23 @@ perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *h
265static __init int amd_ibs_init(void) 264static __init int amd_ibs_init(void)
266{ 265{
267 u32 caps; 266 u32 caps;
268 int ret; 267 int ret = -EINVAL;
269 268
270 caps = __get_ibs_caps(); 269 caps = __get_ibs_caps();
271 if (!caps) 270 if (!caps)
272 return -ENODEV; /* ibs not supported by the cpu */ 271 return -ENODEV; /* ibs not supported by the cpu */
273 272
274 if (!ibs_eilvt_valid()) { 273 /*
275 ret = force_ibs_eilvt_setup(); 274 * Force LVT offset assignment for family 10h: The offsets are
276 if (ret) { 275 * not assigned by the BIOS for this family, so the OS is
277 pr_err("Failed to setup IBS, %d\n", ret); 276 * responsible for doing it. If the OS assignment fails, fall
278 return ret; 277 * back to BIOS settings and try to setup this.
279 } 278 */
280 } 279 if (boot_cpu_data.x86 == 0x10)
280 force_ibs_eilvt_setup();
281
282 if (!ibs_eilvt_valid())
283 goto out;
281 284
282 get_online_cpus(); 285 get_online_cpus();
283 ibs_caps = caps; 286 ibs_caps = caps;
@@ -287,7 +290,11 @@ static __init int amd_ibs_init(void)
287 smp_call_function(setup_APIC_ibs, NULL, 1); 290 smp_call_function(setup_APIC_ibs, NULL, 1);
288 put_online_cpus(); 291 put_online_cpus();
289 292
290 return perf_event_ibs_init(); 293 ret = perf_event_ibs_init();
294out:
295 if (ret)
296 pr_err("Failed to setup IBS, %d\n", ret);
297 return ret;
291} 298}
292 299
293/* Since we need the pci subsystem to init ibs we can't do this earlier: */ 300/* Since we need the pci subsystem to init ibs we can't do this earlier: */
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 2be5ebe99872..8d601b18bf9f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1545,6 +1545,13 @@ static void intel_clovertown_quirks(void)
1545 x86_pmu.pebs_constraints = NULL; 1545 x86_pmu.pebs_constraints = NULL;
1546} 1546}
1547 1547
1548static void intel_sandybridge_quirks(void)
1549{
1550 printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
1551 x86_pmu.pebs = 0;
1552 x86_pmu.pebs_constraints = NULL;
1553}
1554
1548__init int intel_pmu_init(void) 1555__init int intel_pmu_init(void)
1549{ 1556{
1550 union cpuid10_edx edx; 1557 union cpuid10_edx edx;
@@ -1694,6 +1701,7 @@ __init int intel_pmu_init(void)
1694 break; 1701 break;
1695 1702
1696 case 42: /* SandyBridge */ 1703 case 42: /* SandyBridge */
1704 x86_pmu.quirks = intel_sandybridge_quirks;
1697 case 45: /* SandyBridge, "Romely-EP" */ 1705 case 45: /* SandyBridge, "Romely-EP" */
1698 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 1706 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
1699 sizeof(hw_cache_event_ids)); 1707 sizeof(hw_cache_event_ids));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index c0d238f49db8..73da6b64f5b7 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -493,6 +493,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
493 unsigned long from = cpuc->lbr_entries[0].from; 493 unsigned long from = cpuc->lbr_entries[0].from;
494 unsigned long old_to, to = cpuc->lbr_entries[0].to; 494 unsigned long old_to, to = cpuc->lbr_entries[0].to;
495 unsigned long ip = regs->ip; 495 unsigned long ip = regs->ip;
496 int is_64bit = 0;
496 497
497 /* 498 /*
498 * We don't need to fixup if the PEBS assist is fault like 499 * We don't need to fixup if the PEBS assist is fault like
@@ -544,7 +545,10 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
544 } else 545 } else
545 kaddr = (void *)to; 546 kaddr = (void *)to;
546 547
547 kernel_insn_init(&insn, kaddr); 548#ifdef CONFIG_X86_64
549 is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
550#endif
551 insn_init(&insn, kaddr, is_64bit);
548 insn_get_length(&insn); 552 insn_get_length(&insn);
549 to += insn.length; 553 to += insn.length;
550 } while (to < ip); 554 } while (to < ip);
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 492bf1358a7c..ef484d9d0a25 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -1268,7 +1268,7 @@ reserve:
1268 } 1268 }
1269 1269
1270done: 1270done:
1271 return num ? -ENOSPC : 0; 1271 return num ? -EINVAL : 0;
1272} 1272}
1273 1273
1274static __initconst const struct x86_pmu p4_pmu = { 1274static __initconst const struct x86_pmu p4_pmu = {
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index b946a9eac7d9..1bb0bf4d92cd 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1049,6 +1049,14 @@ int hpet_rtc_timer_init(void)
1049} 1049}
1050EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); 1050EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
1051 1051
1052static void hpet_disable_rtc_channel(void)
1053{
1054 unsigned long cfg;
1055 cfg = hpet_readl(HPET_T1_CFG);
1056 cfg &= ~HPET_TN_ENABLE;
1057 hpet_writel(cfg, HPET_T1_CFG);
1058}
1059
1052/* 1060/*
1053 * The functions below are called from rtc driver. 1061 * The functions below are called from rtc driver.
1054 * Return 0 if HPET is not being used. 1062 * Return 0 if HPET is not being used.
@@ -1060,6 +1068,9 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1060 return 0; 1068 return 0;
1061 1069
1062 hpet_rtc_flags &= ~bit_mask; 1070 hpet_rtc_flags &= ~bit_mask;
1071 if (unlikely(!hpet_rtc_flags))
1072 hpet_disable_rtc_channel();
1073
1063 return 1; 1074 return 1;
1064} 1075}
1065EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit); 1076EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
@@ -1125,15 +1136,11 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
1125 1136
1126static void hpet_rtc_timer_reinit(void) 1137static void hpet_rtc_timer_reinit(void)
1127{ 1138{
1128 unsigned int cfg, delta; 1139 unsigned int delta;
1129 int lost_ints = -1; 1140 int lost_ints = -1;
1130 1141
1131 if (unlikely(!hpet_rtc_flags)) { 1142 if (unlikely(!hpet_rtc_flags))
1132 cfg = hpet_readl(HPET_T1_CFG); 1143 hpet_disable_rtc_channel();
1133 cfg &= ~HPET_TN_ENABLE;
1134 hpet_writel(cfg, HPET_T1_CFG);
1135 return;
1136 }
1137 1144
1138 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) 1145 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
1139 delta = hpet_default_delta; 1146 delta = hpet_default_delta;
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index acf8fbf8fbda..69bca468c47a 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -38,6 +38,9 @@ static inline void stack_overflow_check(struct pt_regs *regs)
38#ifdef CONFIG_DEBUG_STACKOVERFLOW 38#ifdef CONFIG_DEBUG_STACKOVERFLOW
39 u64 curbase = (u64)task_stack_page(current); 39 u64 curbase = (u64)task_stack_page(current);
40 40
41 if (user_mode_vm(regs))
42 return;
43
41 WARN_ONCE(regs->sp >= curbase && 44 WARN_ONCE(regs->sp >= curbase &&
42 regs->sp <= curbase + THREAD_SIZE && 45 regs->sp <= curbase + THREAD_SIZE &&
43 regs->sp < curbase + sizeof(struct thread_info) + 46 regs->sp < curbase + sizeof(struct thread_info) +
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index c1a0188e29ae..44842d756b29 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -74,9 +74,10 @@ static cycle_t kvm_clock_read(void)
74 struct pvclock_vcpu_time_info *src; 74 struct pvclock_vcpu_time_info *src;
75 cycle_t ret; 75 cycle_t ret;
76 76
77 src = &get_cpu_var(hv_clock); 77 preempt_disable_notrace();
78 src = &__get_cpu_var(hv_clock);
78 ret = pvclock_clocksource_read(src); 79 ret = pvclock_clocksource_read(src);
79 put_cpu_var(hv_clock); 80 preempt_enable_notrace();
80 return ret; 81 return ret;
81} 82}
82 83
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index f2d2a664e797..9d46f5e43b51 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -256,7 +256,7 @@ static int __init microcode_dev_init(void)
256 return 0; 256 return 0;
257} 257}
258 258
259static void microcode_dev_exit(void) 259static void __exit microcode_dev_exit(void)
260{ 260{
261 misc_deregister(&microcode_dev); 261 misc_deregister(&microcode_dev);
262} 262}
@@ -519,10 +519,8 @@ static int __init microcode_init(void)
519 519
520 microcode_pdev = platform_device_register_simple("microcode", -1, 520 microcode_pdev = platform_device_register_simple("microcode", -1,
521 NULL, 0); 521 NULL, 0);
522 if (IS_ERR(microcode_pdev)) { 522 if (IS_ERR(microcode_pdev))
523 microcode_dev_exit();
524 return PTR_ERR(microcode_pdev); 523 return PTR_ERR(microcode_pdev);
525 }
526 524
527 get_online_cpus(); 525 get_online_cpus();
528 mutex_lock(&microcode_mutex); 526 mutex_lock(&microcode_mutex);
@@ -532,14 +530,12 @@ static int __init microcode_init(void)
532 mutex_unlock(&microcode_mutex); 530 mutex_unlock(&microcode_mutex);
533 put_online_cpus(); 531 put_online_cpus();
534 532
535 if (error) { 533 if (error)
536 platform_device_unregister(microcode_pdev); 534 goto out_pdev;
537 return error;
538 }
539 535
540 error = microcode_dev_init(); 536 error = microcode_dev_init();
541 if (error) 537 if (error)
542 return error; 538 goto out_sysdev_driver;
543 539
544 register_syscore_ops(&mc_syscore_ops); 540 register_syscore_ops(&mc_syscore_ops);
545 register_hotcpu_notifier(&mc_cpu_notifier); 541 register_hotcpu_notifier(&mc_cpu_notifier);
@@ -548,6 +544,20 @@ static int __init microcode_init(void)
548 " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n"); 544 " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
549 545
550 return 0; 546 return 0;
547
548out_sysdev_driver:
549 get_online_cpus();
550 mutex_lock(&microcode_mutex);
551
552 sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver);
553
554 mutex_unlock(&microcode_mutex);
555 put_online_cpus();
556
557out_pdev:
558 platform_device_unregister(microcode_pdev);
559 return error;
560
551} 561}
552module_init(microcode_init); 562module_init(microcode_init);
553 563
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 9103b89c145a..0741b062a304 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_bus *m)
95 } 95 }
96#endif 96#endif
97 97
98 set_bit(m->busid, mp_bus_not_pci);
98 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { 99 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
99 set_bit(m->busid, mp_bus_not_pci);
100#if defined(CONFIG_EISA) || defined(CONFIG_MCA) 100#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
101 mp_bus_id_to_type[m->busid] = MP_BUS_ISA; 101 mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
102#endif 102#endif
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index b9c8628974af..e88f37b58ddd 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -29,6 +29,7 @@
29#include <asm/traps.h> 29#include <asm/traps.h>
30#include <asm/mach_traps.h> 30#include <asm/mach_traps.h>
31#include <asm/nmi.h> 31#include <asm/nmi.h>
32#include <asm/x86_init.h>
32 33
33#define NMI_MAX_NAMELEN 16 34#define NMI_MAX_NAMELEN 16
34struct nmiaction { 35struct nmiaction {
@@ -348,7 +349,7 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
348 349
349 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ 350 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
350 raw_spin_lock(&nmi_reason_lock); 351 raw_spin_lock(&nmi_reason_lock);
351 reason = get_nmi_reason(); 352 reason = x86_platform.get_nmi_reason();
352 353
353 if (reason & NMI_REASON_MASK) { 354 if (reason & NMI_REASON_MASK) {
354 if (reason & NMI_REASON_SERR) 355 if (reason & NMI_REASON_SERR)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index b9b3b1a51643..ee5d4fbd53b4 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -403,6 +403,14 @@ void default_idle(void)
403EXPORT_SYMBOL(default_idle); 403EXPORT_SYMBOL(default_idle);
404#endif 404#endif
405 405
406bool set_pm_idle_to_default(void)
407{
408 bool ret = !!pm_idle;
409
410 pm_idle = default_idle;
411
412 return ret;
413}
406void stop_this_cpu(void *dummy) 414void stop_this_cpu(void *dummy)
407{ 415{
408 local_irq_disable(); 416 local_irq_disable();
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index b78643d0f9a5..03920a15a632 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -553,4 +553,17 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
553 quirk_amd_nb_node); 553 quirk_amd_nb_node);
554DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK, 554DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
555 quirk_amd_nb_node); 555 quirk_amd_nb_node);
556DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
557 quirk_amd_nb_node);
558DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
559 quirk_amd_nb_node);
560DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
561 quirk_amd_nb_node);
562DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
563 quirk_amd_nb_node);
564DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
565 quirk_amd_nb_node);
566DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
567 quirk_amd_nb_node);
568
556#endif 569#endif
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index e334be1182b9..37a458b521a6 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -124,7 +124,7 @@ __setup("reboot=", reboot_setup);
124 */ 124 */
125 125
126/* 126/*
127 * Some machines require the "reboot=b" commandline option, 127 * Some machines require the "reboot=b" or "reboot=k" commandline options,
128 * this quirk makes that automatic. 128 * this quirk makes that automatic.
129 */ 129 */
130static int __init set_bios_reboot(const struct dmi_system_id *d) 130static int __init set_bios_reboot(const struct dmi_system_id *d)
@@ -136,6 +136,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
136 return 0; 136 return 0;
137} 137}
138 138
139static int __init set_kbd_reboot(const struct dmi_system_id *d)
140{
141 if (reboot_type != BOOT_KBD) {
142 reboot_type = BOOT_KBD;
143 printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident);
144 }
145 return 0;
146}
147
139static struct dmi_system_id __initdata reboot_dmi_table[] = { 148static struct dmi_system_id __initdata reboot_dmi_table[] = {
140 { /* Handle problems with rebooting on Dell E520's */ 149 { /* Handle problems with rebooting on Dell E520's */
141 .callback = set_bios_reboot, 150 .callback = set_bios_reboot,
@@ -295,7 +304,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
295 }, 304 },
296 }, 305 },
297 { /* Handle reboot issue on Acer Aspire one */ 306 { /* Handle reboot issue on Acer Aspire one */
298 .callback = set_bios_reboot, 307 .callback = set_kbd_reboot,
299 .ident = "Acer Aspire One A110", 308 .ident = "Acer Aspire One A110",
300 .matches = { 309 .matches = {
301 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 310 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -443,6 +452,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
443 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), 452 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
444 }, 453 },
445 }, 454 },
455 { /* Handle problems with rebooting on the OptiPlex 990. */
456 .callback = set_pci_reboot,
457 .ident = "Dell OptiPlex 990",
458 .matches = {
459 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
460 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
461 },
462 },
446 { } 463 { }
447}; 464};
448 465
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 348ce016a835..af6db6ec5b2a 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -12,6 +12,7 @@
12#include <asm/vsyscall.h> 12#include <asm/vsyscall.h>
13#include <asm/x86_init.h> 13#include <asm/x86_init.h>
14#include <asm/time.h> 14#include <asm/time.h>
15#include <asm/mrst.h>
15 16
16#ifdef CONFIG_X86_32 17#ifdef CONFIG_X86_32
17/* 18/*
@@ -242,6 +243,10 @@ static __init int add_rtc_cmos(void)
242 if (of_have_populated_dt()) 243 if (of_have_populated_dt())
243 return 0; 244 return 0;
244 245
246 /* Intel MID platforms don't have ioport rtc */
247 if (mrst_identify_cpu())
248 return -ENODEV;
249
245 platform_device_register(&rtc_device); 250 platform_device_register(&rtc_device);
246 dev_info(&rtc_device.dev, 251 dev_info(&rtc_device.dev,
247 "registered platform RTC device (no PNP device found)\n"); 252 "registered platform RTC device (no PNP device found)\n");
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index afaf38447ef5..cf0ef986cb6d 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1045,6 +1045,8 @@ void __init setup_arch(char **cmdline_p)
1045 1045
1046 x86_init.timers.wallclock_init(); 1046 x86_init.timers.wallclock_init();
1047 1047
1048 x86_platform.wallclock_init();
1049
1048 mcheck_init(); 1050 mcheck_init();
1049 1051
1050 arch_init_ideal_nops(); 1052 arch_init_ideal_nops();
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 6f164bd5e14d..c1d6cd549397 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -21,12 +21,14 @@
21#include <asm/pat.h> 21#include <asm/pat.h>
22#include <asm/tsc.h> 22#include <asm/tsc.h>
23#include <asm/iommu.h> 23#include <asm/iommu.h>
24#include <asm/mach_traps.h>
24 25
25void __cpuinit x86_init_noop(void) { } 26void __cpuinit x86_init_noop(void) { }
26void __init x86_init_uint_noop(unsigned int unused) { } 27void __init x86_init_uint_noop(unsigned int unused) { }
27void __init x86_init_pgd_noop(pgd_t *unused) { } 28void __init x86_init_pgd_noop(pgd_t *unused) { }
28int __init iommu_init_noop(void) { return 0; } 29int __init iommu_init_noop(void) { return 0; }
29void iommu_shutdown_noop(void) { } 30void iommu_shutdown_noop(void) { }
31void wallclock_init_noop(void) { }
30 32
31/* 33/*
32 * The platform setup functions are preset with the default functions 34 * The platform setup functions are preset with the default functions
@@ -97,11 +99,13 @@ static int default_i8042_detect(void) { return 1; };
97 99
98struct x86_platform_ops x86_platform = { 100struct x86_platform_ops x86_platform = {
99 .calibrate_tsc = native_calibrate_tsc, 101 .calibrate_tsc = native_calibrate_tsc,
102 .wallclock_init = wallclock_init_noop,
100 .get_wallclock = mach_get_cmos_time, 103 .get_wallclock = mach_get_cmos_time,
101 .set_wallclock = mach_set_rtc_mmss, 104 .set_wallclock = mach_set_rtc_mmss,
102 .iommu_shutdown = iommu_shutdown_noop, 105 .iommu_shutdown = iommu_shutdown_noop,
103 .is_untracked_pat_range = is_ISA_range, 106 .is_untracked_pat_range = is_ISA_range,
104 .nmi_init = default_nmi_init, 107 .nmi_init = default_nmi_init,
108 .get_nmi_reason = default_get_nmi_reason,
105 .i8042_detect = default_i8042_detect 109 .i8042_detect = default_i8042_detect
106}; 110};
107 111
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a0d6bd9ad442..579a0b51696a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -39,6 +39,7 @@
39#include <asm/mce.h> 39#include <asm/mce.h>
40#include <asm/i387.h> 40#include <asm/i387.h>
41#include <asm/xcr.h> 41#include <asm/xcr.h>
42#include <asm/perf_event.h>
42 43
43#include "trace.h" 44#include "trace.h"
44 45
@@ -118,7 +119,7 @@ module_param(ple_gap, int, S_IRUGO);
118static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; 119static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
119module_param(ple_window, int, S_IRUGO); 120module_param(ple_window, int, S_IRUGO);
120 121
121#define NR_AUTOLOAD_MSRS 1 122#define NR_AUTOLOAD_MSRS 8
122#define VMCS02_POOL_SIZE 1 123#define VMCS02_POOL_SIZE 1
123 124
124struct vmcs { 125struct vmcs {
@@ -622,6 +623,7 @@ static unsigned long *vmx_msr_bitmap_legacy;
622static unsigned long *vmx_msr_bitmap_longmode; 623static unsigned long *vmx_msr_bitmap_longmode;
623 624
624static bool cpu_has_load_ia32_efer; 625static bool cpu_has_load_ia32_efer;
626static bool cpu_has_load_perf_global_ctrl;
625 627
626static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); 628static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
627static DEFINE_SPINLOCK(vmx_vpid_lock); 629static DEFINE_SPINLOCK(vmx_vpid_lock);
@@ -1191,15 +1193,34 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1191 vmcs_write32(EXCEPTION_BITMAP, eb); 1193 vmcs_write32(EXCEPTION_BITMAP, eb);
1192} 1194}
1193 1195
1196static void clear_atomic_switch_msr_special(unsigned long entry,
1197 unsigned long exit)
1198{
1199 vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
1200 vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
1201}
1202
1194static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) 1203static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1195{ 1204{
1196 unsigned i; 1205 unsigned i;
1197 struct msr_autoload *m = &vmx->msr_autoload; 1206 struct msr_autoload *m = &vmx->msr_autoload;
1198 1207
1199 if (msr == MSR_EFER && cpu_has_load_ia32_efer) { 1208 switch (msr) {
1200 vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); 1209 case MSR_EFER:
1201 vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); 1210 if (cpu_has_load_ia32_efer) {
1202 return; 1211 clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
1212 VM_EXIT_LOAD_IA32_EFER);
1213 return;
1214 }
1215 break;
1216 case MSR_CORE_PERF_GLOBAL_CTRL:
1217 if (cpu_has_load_perf_global_ctrl) {
1218 clear_atomic_switch_msr_special(
1219 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1220 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1221 return;
1222 }
1223 break;
1203 } 1224 }
1204 1225
1205 for (i = 0; i < m->nr; ++i) 1226 for (i = 0; i < m->nr; ++i)
@@ -1215,25 +1236,55 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1215 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); 1236 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1216} 1237}
1217 1238
1239static void add_atomic_switch_msr_special(unsigned long entry,
1240 unsigned long exit, unsigned long guest_val_vmcs,
1241 unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
1242{
1243 vmcs_write64(guest_val_vmcs, guest_val);
1244 vmcs_write64(host_val_vmcs, host_val);
1245 vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
1246 vmcs_set_bits(VM_EXIT_CONTROLS, exit);
1247}
1248
1218static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, 1249static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1219 u64 guest_val, u64 host_val) 1250 u64 guest_val, u64 host_val)
1220{ 1251{
1221 unsigned i; 1252 unsigned i;
1222 struct msr_autoload *m = &vmx->msr_autoload; 1253 struct msr_autoload *m = &vmx->msr_autoload;
1223 1254
1224 if (msr == MSR_EFER && cpu_has_load_ia32_efer) { 1255 switch (msr) {
1225 vmcs_write64(GUEST_IA32_EFER, guest_val); 1256 case MSR_EFER:
1226 vmcs_write64(HOST_IA32_EFER, host_val); 1257 if (cpu_has_load_ia32_efer) {
1227 vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); 1258 add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
1228 vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); 1259 VM_EXIT_LOAD_IA32_EFER,
1229 return; 1260 GUEST_IA32_EFER,
1261 HOST_IA32_EFER,
1262 guest_val, host_val);
1263 return;
1264 }
1265 break;
1266 case MSR_CORE_PERF_GLOBAL_CTRL:
1267 if (cpu_has_load_perf_global_ctrl) {
1268 add_atomic_switch_msr_special(
1269 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1270 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1271 GUEST_IA32_PERF_GLOBAL_CTRL,
1272 HOST_IA32_PERF_GLOBAL_CTRL,
1273 guest_val, host_val);
1274 return;
1275 }
1276 break;
1230 } 1277 }
1231 1278
1232 for (i = 0; i < m->nr; ++i) 1279 for (i = 0; i < m->nr; ++i)
1233 if (m->guest[i].index == msr) 1280 if (m->guest[i].index == msr)
1234 break; 1281 break;
1235 1282
1236 if (i == m->nr) { 1283 if (i == NR_AUTOLOAD_MSRS) {
1284 printk_once(KERN_WARNING"Not enough mst switch entries. "
1285 "Can't add msr %x\n", msr);
1286 return;
1287 } else if (i == m->nr) {
1237 ++m->nr; 1288 ++m->nr;
1238 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); 1289 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1239 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); 1290 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
@@ -2455,6 +2506,42 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2455 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, 2506 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2456 VM_EXIT_LOAD_IA32_EFER); 2507 VM_EXIT_LOAD_IA32_EFER);
2457 2508
2509 cpu_has_load_perf_global_ctrl =
2510 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2511 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
2512 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2513 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2514
2515 /*
2516 * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
2517 * but due to arrata below it can't be used. Workaround is to use
2518 * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2519 *
2520 * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
2521 *
2522 * AAK155 (model 26)
2523 * AAP115 (model 30)
2524 * AAT100 (model 37)
2525 * BC86,AAY89,BD102 (model 44)
2526 * BA97 (model 46)
2527 *
2528 */
2529 if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
2530 switch (boot_cpu_data.x86_model) {
2531 case 26:
2532 case 30:
2533 case 37:
2534 case 44:
2535 case 46:
2536 cpu_has_load_perf_global_ctrl = false;
2537 printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
2538 "does not work properly. Using workaround\n");
2539 break;
2540 default:
2541 break;
2542 }
2543 }
2544
2458 return 0; 2545 return 0;
2459} 2546}
2460 2547
@@ -5968,6 +6055,24 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
5968 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 6055 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
5969} 6056}
5970 6057
6058static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
6059{
6060 int i, nr_msrs;
6061 struct perf_guest_switch_msr *msrs;
6062
6063 msrs = perf_guest_get_msrs(&nr_msrs);
6064
6065 if (!msrs)
6066 return;
6067
6068 for (i = 0; i < nr_msrs; i++)
6069 if (msrs[i].host == msrs[i].guest)
6070 clear_atomic_switch_msr(vmx, msrs[i].msr);
6071 else
6072 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
6073 msrs[i].host);
6074}
6075
5971#ifdef CONFIG_X86_64 6076#ifdef CONFIG_X86_64
5972#define R "r" 6077#define R "r"
5973#define Q "q" 6078#define Q "q"
@@ -6017,6 +6122,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
6017 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 6122 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6018 vmx_set_interrupt_shadow(vcpu, 0); 6123 vmx_set_interrupt_shadow(vcpu, 0);
6019 6124
6125 atomic_switch_perf_msrs(vmx);
6126
6020 vmx->__launched = vmx->loaded_vmcs->launched; 6127 vmx->__launched = vmx->loaded_vmcs->launched;
6021 asm( 6128 asm(
6022 /* Store host registers */ 6129 /* Store host registers */
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index ea305856151c..dd74e46828c0 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -201,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
201 do { 201 do {
202 VM_BUG_ON(compound_head(page) != head); 202 VM_BUG_ON(compound_head(page) != head);
203 pages[*nr] = page; 203 pages[*nr] = page;
204 if (PageTail(page))
205 get_huge_page_tail(page);
204 (*nr)++; 206 (*nr)++;
205 page++; 207 page++;
206 refs++; 208 refs++;
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index b49962662101..f4f29b19fac5 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -45,6 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
45 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 45 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
46 BUG_ON(!pte_none(*(kmap_pte-idx))); 46 BUG_ON(!pte_none(*(kmap_pte-idx)));
47 set_pte(kmap_pte-idx, mk_pte(page, prot)); 47 set_pte(kmap_pte-idx, mk_pte(page, prot));
48 arch_flush_lazy_mmu_mode();
48 49
49 return (void *)vaddr; 50 return (void *)vaddr;
50} 51}
@@ -88,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
88 */ 89 */
89 kpte_clear_flush(kmap_pte-idx, vaddr); 90 kpte_clear_flush(kmap_pte-idx, vaddr);
90 kmap_atomic_idx_pop(); 91 kmap_atomic_idx_pop();
92 arch_flush_lazy_mmu_mode();
91 } 93 }
92#ifdef CONFIG_DEBUG_HIGHMEM 94#ifdef CONFIG_DEBUG_HIGHMEM
93 else { 95 else {
diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c
index cdfe4c54deca..f148cf652678 100644
--- a/arch/x86/oprofile/init.c
+++ b/arch/x86/oprofile/init.c
@@ -21,6 +21,7 @@ extern int op_nmi_timer_init(struct oprofile_operations *ops);
21extern void op_nmi_exit(void); 21extern void op_nmi_exit(void);
22extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); 22extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
23 23
24static int nmi_timer;
24 25
25int __init oprofile_arch_init(struct oprofile_operations *ops) 26int __init oprofile_arch_init(struct oprofile_operations *ops)
26{ 27{
@@ -31,8 +32,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
31#ifdef CONFIG_X86_LOCAL_APIC 32#ifdef CONFIG_X86_LOCAL_APIC
32 ret = op_nmi_init(ops); 33 ret = op_nmi_init(ops);
33#endif 34#endif
35 nmi_timer = (ret != 0);
34#ifdef CONFIG_X86_IO_APIC 36#ifdef CONFIG_X86_IO_APIC
35 if (ret < 0) 37 if (nmi_timer)
36 ret = op_nmi_timer_init(ops); 38 ret = op_nmi_timer_init(ops);
37#endif 39#endif
38 ops->backtrace = x86_backtrace; 40 ops->backtrace = x86_backtrace;
@@ -44,6 +46,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
44void oprofile_arch_exit(void) 46void oprofile_arch_exit(void)
45{ 47{
46#ifdef CONFIG_X86_LOCAL_APIC 48#ifdef CONFIG_X86_LOCAL_APIC
47 op_nmi_exit(); 49 if (!nmi_timer)
50 op_nmi_exit();
48#endif 51#endif
49} 52}
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index 28071bb31db7..4c61b52191eb 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -109,7 +109,7 @@ static __init void sdv_serial_fixup(void)
109} 109}
110 110
111#else 111#else
112static inline void sdv_serial_fixup(void); 112static inline void sdv_serial_fixup(void) {};
113#endif 113#endif
114 114
115static void __init sdv_arch_setup(void) 115static void __init sdv_arch_setup(void)
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index e36bf714cb77..40e446941dd7 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -39,43 +39,14 @@
39 */ 39 */
40 40
41static unsigned long efi_rt_eflags; 41static unsigned long efi_rt_eflags;
42static pgd_t efi_bak_pg_dir_pointer[2];
43 42
44void efi_call_phys_prelog(void) 43void efi_call_phys_prelog(void)
45{ 44{
46 unsigned long cr4;
47 unsigned long temp;
48 struct desc_ptr gdt_descr; 45 struct desc_ptr gdt_descr;
49 46
50 local_irq_save(efi_rt_eflags); 47 local_irq_save(efi_rt_eflags);
51 48
52 /* 49 load_cr3(initial_page_table);
53 * If I don't have PAE, I should just duplicate two entries in page
54 * directory. If I have PAE, I just need to duplicate one entry in
55 * page directory.
56 */
57 cr4 = read_cr4_safe();
58
59 if (cr4 & X86_CR4_PAE) {
60 efi_bak_pg_dir_pointer[0].pgd =
61 swapper_pg_dir[pgd_index(0)].pgd;
62 swapper_pg_dir[0].pgd =
63 swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
64 } else {
65 efi_bak_pg_dir_pointer[0].pgd =
66 swapper_pg_dir[pgd_index(0)].pgd;
67 efi_bak_pg_dir_pointer[1].pgd =
68 swapper_pg_dir[pgd_index(0x400000)].pgd;
69 swapper_pg_dir[pgd_index(0)].pgd =
70 swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
71 temp = PAGE_OFFSET + 0x400000;
72 swapper_pg_dir[pgd_index(0x400000)].pgd =
73 swapper_pg_dir[pgd_index(temp)].pgd;
74 }
75
76 /*
77 * After the lock is released, the original page table is restored.
78 */
79 __flush_tlb_all(); 50 __flush_tlb_all();
80 51
81 gdt_descr.address = __pa(get_cpu_gdt_table(0)); 52 gdt_descr.address = __pa(get_cpu_gdt_table(0));
@@ -85,28 +56,13 @@ void efi_call_phys_prelog(void)
85 56
86void efi_call_phys_epilog(void) 57void efi_call_phys_epilog(void)
87{ 58{
88 unsigned long cr4;
89 struct desc_ptr gdt_descr; 59 struct desc_ptr gdt_descr;
90 60
91 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); 61 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
92 gdt_descr.size = GDT_SIZE - 1; 62 gdt_descr.size = GDT_SIZE - 1;
93 load_gdt(&gdt_descr); 63 load_gdt(&gdt_descr);
94 64
95 cr4 = read_cr4_safe(); 65 load_cr3(swapper_pg_dir);
96
97 if (cr4 & X86_CR4_PAE) {
98 swapper_pg_dir[pgd_index(0)].pgd =
99 efi_bak_pg_dir_pointer[0].pgd;
100 } else {
101 swapper_pg_dir[pgd_index(0)].pgd =
102 efi_bak_pg_dir_pointer[0].pgd;
103 swapper_pg_dir[pgd_index(0x400000)].pgd =
104 efi_bak_pg_dir_pointer[1].pgd;
105 }
106
107 /*
108 * After the lock is released, the original page table is restored.
109 */
110 __flush_tlb_all(); 66 __flush_tlb_all();
111 67
112 local_irq_restore(efi_rt_eflags); 68 local_irq_restore(efi_rt_eflags);
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index 6ed7afdaf4af..ad4ec1cb097e 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -76,6 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
76EXPORT_SYMBOL_GPL(sfi_mrtc_array); 76EXPORT_SYMBOL_GPL(sfi_mrtc_array);
77int sfi_mrtc_num; 77int sfi_mrtc_num;
78 78
79static void mrst_power_off(void)
80{
81 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
82 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
83}
84
85static void mrst_reboot(void)
86{
87 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
88 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
89 else
90 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
91}
92
79/* parse all the mtimer info to a static mtimer array */ 93/* parse all the mtimer info to a static mtimer array */
80static int __init sfi_parse_mtmr(struct sfi_table_header *table) 94static int __init sfi_parse_mtmr(struct sfi_table_header *table)
81{ 95{
@@ -187,11 +201,34 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
187static unsigned long __init mrst_calibrate_tsc(void) 201static unsigned long __init mrst_calibrate_tsc(void)
188{ 202{
189 unsigned long flags, fast_calibrate; 203 unsigned long flags, fast_calibrate;
190 204 if (__mrst_cpu_chip == MRST_CPU_CHIP_PENWELL) {
191 local_irq_save(flags); 205 u32 lo, hi, ratio, fsb;
192 fast_calibrate = apbt_quick_calibrate(); 206
193 local_irq_restore(flags); 207 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
194 208 pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
209 ratio = (hi >> 8) & 0x1f;
210 pr_debug("ratio is %d\n", ratio);
211 if (!ratio) {
212 pr_err("read a zero ratio, should be incorrect!\n");
213 pr_err("force tsc ratio to 16 ...\n");
214 ratio = 16;
215 }
216 rdmsr(MSR_FSB_FREQ, lo, hi);
217 if ((lo & 0x7) == 0x7)
218 fsb = PENWELL_FSB_FREQ_83SKU;
219 else
220 fsb = PENWELL_FSB_FREQ_100SKU;
221 fast_calibrate = ratio * fsb;
222 pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
223 lapic_timer_frequency = fsb * 1000 / HZ;
224 /* mark tsc clocksource as reliable */
225 set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
226 } else {
227 local_irq_save(flags);
228 fast_calibrate = apbt_quick_calibrate();
229 local_irq_restore(flags);
230 }
231
195 if (fast_calibrate) 232 if (fast_calibrate)
196 return fast_calibrate; 233 return fast_calibrate;
197 234
@@ -242,15 +279,15 @@ static int mrst_i8042_detect(void)
242 return 0; 279 return 0;
243} 280}
244 281
245/* Reboot and power off are handled by the SCU on a MID device */ 282/*
246static void mrst_power_off(void) 283 * Moorestown does not have external NMI source nor port 0x61 to report
247{ 284 * NMI status. The possible NMI sources are from pmu as a result of NMI
248 intel_scu_ipc_simple_command(0xf1, 1); 285 * watchdog or lock debug. Reading io port 0x61 results in 0xff which
249} 286 * misled NMI handler.
250 287 */
251static void mrst_reboot(void) 288static unsigned char mrst_get_nmi_reason(void)
252{ 289{
253 intel_scu_ipc_simple_command(0xf1, 0); 290 return 0;
254} 291}
255 292
256/* 293/*
@@ -274,6 +311,8 @@ void __init x86_mrst_early_setup(void)
274 x86_platform.calibrate_tsc = mrst_calibrate_tsc; 311 x86_platform.calibrate_tsc = mrst_calibrate_tsc;
275 x86_platform.i8042_detect = mrst_i8042_detect; 312 x86_platform.i8042_detect = mrst_i8042_detect;
276 x86_init.timers.wallclock_init = mrst_rtc_init; 313 x86_init.timers.wallclock_init = mrst_rtc_init;
314 x86_platform.get_nmi_reason = mrst_get_nmi_reason;
315
277 x86_init.pci.init = pci_mrst_init; 316 x86_init.pci.init = pci_mrst_init;
278 x86_init.pci.fixup_irqs = x86_init_noop; 317 x86_init.pci.fixup_irqs = x86_init_noop;
279 318
@@ -448,6 +487,46 @@ static void __init *max7315_platform_data(void *info)
448 return max7315; 487 return max7315;
449} 488}
450 489
490static void *tca6416_platform_data(void *info)
491{
492 static struct pca953x_platform_data tca6416;
493 struct i2c_board_info *i2c_info = info;
494 int gpio_base, intr;
495 char base_pin_name[SFI_NAME_LEN + 1];
496 char intr_pin_name[SFI_NAME_LEN + 1];
497
498 strcpy(i2c_info->type, "tca6416");
499 strcpy(base_pin_name, "tca6416_base");
500 strcpy(intr_pin_name, "tca6416_int");
501
502 gpio_base = get_gpio_by_name(base_pin_name);
503 intr = get_gpio_by_name(intr_pin_name);
504
505 if (gpio_base == -1)
506 return NULL;
507 tca6416.gpio_base = gpio_base;
508 if (intr != -1) {
509 i2c_info->irq = intr + MRST_IRQ_OFFSET;
510 tca6416.irq_base = gpio_base + MRST_IRQ_OFFSET;
511 } else {
512 i2c_info->irq = -1;
513 tca6416.irq_base = -1;
514 }
515 return &tca6416;
516}
517
518static void *mpu3050_platform_data(void *info)
519{
520 struct i2c_board_info *i2c_info = info;
521 int intr = get_gpio_by_name("mpu3050_int");
522
523 if (intr == -1)
524 return NULL;
525
526 i2c_info->irq = intr + MRST_IRQ_OFFSET;
527 return NULL;
528}
529
451static void __init *emc1403_platform_data(void *info) 530static void __init *emc1403_platform_data(void *info)
452{ 531{
453 static short intr2nd_pdata; 532 static short intr2nd_pdata;
@@ -608,13 +687,17 @@ static void *msic_ocd_platform_data(void *info)
608} 687}
609 688
610static const struct devs_id __initconst device_ids[] = { 689static const struct devs_id __initconst device_ids[] = {
690 {"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
611 {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data}, 691 {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
692 {"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data},
612 {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data}, 693 {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
613 {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data}, 694 {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
614 {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data}, 695 {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
696 {"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data},
615 {"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data}, 697 {"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data},
616 {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data}, 698 {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
617 {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data}, 699 {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
700 {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data},
618 701
619 /* MSIC subdevices */ 702 /* MSIC subdevices */
620 {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data}, 703 {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data},
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
index a8ac6f1eb66d..225bd0f0f675 100644
--- a/arch/x86/platform/mrst/vrtc.c
+++ b/arch/x86/platform/mrst/vrtc.c
@@ -76,8 +76,8 @@ unsigned long vrtc_get_time(void)
76 76
77 spin_unlock_irqrestore(&rtc_lock, flags); 77 spin_unlock_irqrestore(&rtc_lock, flags);
78 78
79 /* vRTC YEAR reg contains the offset to 1960 */ 79 /* vRTC YEAR reg contains the offset to 1972 */
80 year += 1960; 80 year += 1972;
81 81
82 printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d " 82 printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d "
83 "mon: %d year: %d\n", sec, min, hour, mday, mon, year); 83 "mon: %d year: %d\n", sec, min, hour, mday, mon, year);
diff --git a/arch/x86/um/asm/processor.h b/arch/x86/um/asm/processor.h
index 118c143a9cb4..2c32df6fe231 100644
--- a/arch/x86/um/asm/processor.h
+++ b/arch/x86/um/asm/processor.h
@@ -11,7 +11,7 @@
11#endif 11#endif
12 12
13#define KSTK_EIP(tsk) KSTK_REG(tsk, HOST_IP) 13#define KSTK_EIP(tsk) KSTK_REG(tsk, HOST_IP)
14#define KSTK_ESP(tsk) KSTK_REG(tsk, HOST_IP) 14#define KSTK_ESP(tsk) KSTK_REG(tsk, HOST_SP)
15#define KSTK_EBP(tsk) KSTK_REG(tsk, HOST_BP) 15#define KSTK_EBP(tsk) KSTK_REG(tsk, HOST_BP)
16 16
17#define ARCH_IS_STACKGROW(address) \ 17#define ARCH_IS_STACKGROW(address) \
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index da8afd576a6b..1f928659c338 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1356,7 +1356,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
1356 int cpu = (long)hcpu; 1356 int cpu = (long)hcpu;
1357 switch (action) { 1357 switch (action) {
1358 case CPU_UP_PREPARE: 1358 case CPU_UP_PREPARE:
1359 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 1359 xen_vcpu_setup(cpu);
1360 if (xen_have_vector_callback) 1360 if (xen_have_vector_callback)
1361 xen_init_lock_cpu(cpu); 1361 xen_init_lock_cpu(cpu);
1362 break; 1362 break;
@@ -1386,7 +1386,6 @@ static void __init xen_hvm_guest_init(void)
1386 xen_hvm_smp_init(); 1386 xen_hvm_smp_init();
1387 register_cpu_notifier(&xen_hvm_cpu_notifier); 1387 register_cpu_notifier(&xen_hvm_cpu_notifier);
1388 xen_unplug_emulated_devices(); 1388 xen_unplug_emulated_devices();
1389 have_vcpu_info_placement = 0;
1390 x86_init.irqs.intr_init = xen_init_IRQ; 1389 x86_init.irqs.intr_init = xen_init_IRQ;
1391 xen_hvm_init_time_ops(); 1390 xen_hvm_init_time_ops();
1392 xen_hvm_init_mmu_ops(); 1391 xen_hvm_init_mmu_ops();
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index 6bbfd7ac5e81..5a40d24ba331 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -71,7 +71,7 @@ int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
71 71
72 if (shared == NULL) { 72 if (shared == NULL) {
73 struct vm_struct *area = 73 struct vm_struct *area =
74 alloc_vm_area(PAGE_SIZE * max_nr_gframes); 74 alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
75 BUG_ON(area == NULL); 75 BUG_ON(area == NULL);
76 shared = area->addr; 76 shared = area->addr;
77 *__shared = shared; 77 *__shared = shared;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 38d0af4fefec..b2c7179fa263 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -173,9 +173,21 @@ static unsigned long __init xen_get_max_pages(void)
173 domid_t domid = DOMID_SELF; 173 domid_t domid = DOMID_SELF;
174 int ret; 174 int ret;
175 175
176 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); 176 /*
177 if (ret > 0) 177 * For the initial domain we use the maximum reservation as
178 max_pages = ret; 178 * the maximum page.
179 *
180 * For guest domains the current maximum reservation reflects
181 * the current maximum rather than the static maximum. In this
182 * case the e820 map provided to us will cover the static
183 * maximum region.
184 */
185 if (xen_initial_domain()) {
186 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
187 if (ret > 0)
188 max_pages = ret;
189 }
190
179 return min(max_pages, MAX_DOMAIN_PAGES); 191 return min(max_pages, MAX_DOMAIN_PAGES);
180} 192}
181 193
@@ -410,6 +422,6 @@ void __init xen_arch_setup(void)
410#endif 422#endif
411 disable_cpuidle(); 423 disable_cpuidle();
412 boot_option_idle_override = IDLE_HALT; 424 boot_option_idle_override = IDLE_HALT;
413 425 WARN_ON(set_pm_idle_to_default());
414 fiddle_vdso(); 426 fiddle_vdso();
415} 427}
diff --git a/block/blk-core.c b/block/blk-core.c
index f43c8a5840ae..15de223c7f93 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -366,7 +366,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
366 if (drain_all) 366 if (drain_all)
367 blk_throtl_drain(q); 367 blk_throtl_drain(q);
368 368
369 __blk_run_queue(q); 369 /*
370 * This function might be called on a queue which failed
371 * driver init after queue creation. Some drivers
372 * (e.g. fd) get unhappy in such cases. Kick queue iff
373 * dispatch queue has something on it.
374 */
375 if (!list_empty(&q->queue_head))
376 __blk_run_queue(q);
370 377
371 if (drain_all) 378 if (drain_all)
372 nr_rqs = q->rq.count[0] + q->rq.count[1]; 379 nr_rqs = q->rq.count[0] + q->rq.count[1];
@@ -467,6 +474,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
467 q->backing_dev_info.state = 0; 474 q->backing_dev_info.state = 0;
468 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 475 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
469 q->backing_dev_info.name = "block"; 476 q->backing_dev_info.name = "block";
477 q->node = node_id;
470 478
471 err = bdi_init(&q->backing_dev_info); 479 err = bdi_init(&q->backing_dev_info);
472 if (err) { 480 if (err) {
@@ -551,7 +559,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
551 if (!uninit_q) 559 if (!uninit_q)
552 return NULL; 560 return NULL;
553 561
554 q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id); 562 q = blk_init_allocated_queue(uninit_q, rfn, lock);
555 if (!q) 563 if (!q)
556 blk_cleanup_queue(uninit_q); 564 blk_cleanup_queue(uninit_q);
557 565
@@ -563,18 +571,9 @@ struct request_queue *
563blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 571blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
564 spinlock_t *lock) 572 spinlock_t *lock)
565{ 573{
566 return blk_init_allocated_queue_node(q, rfn, lock, -1);
567}
568EXPORT_SYMBOL(blk_init_allocated_queue);
569
570struct request_queue *
571blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
572 spinlock_t *lock, int node_id)
573{
574 if (!q) 574 if (!q)
575 return NULL; 575 return NULL;
576 576
577 q->node = node_id;
578 if (blk_init_free_list(q)) 577 if (blk_init_free_list(q))
579 return NULL; 578 return NULL;
580 579
@@ -604,7 +603,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
604 603
605 return NULL; 604 return NULL;
606} 605}
607EXPORT_SYMBOL(blk_init_allocated_queue_node); 606EXPORT_SYMBOL(blk_init_allocated_queue);
608 607
609int blk_get_queue(struct request_queue *q) 608int blk_get_queue(struct request_queue *q)
610{ 609{
@@ -1379,15 +1378,19 @@ get_rq:
1379 */ 1378 */
1380 if (list_empty(&plug->list)) 1379 if (list_empty(&plug->list))
1381 trace_block_plug(q); 1380 trace_block_plug(q);
1382 else if (!plug->should_sort) { 1381 else {
1383 struct request *__rq; 1382 if (!plug->should_sort) {
1383 struct request *__rq;
1384 1384
1385 __rq = list_entry_rq(plug->list.prev); 1385 __rq = list_entry_rq(plug->list.prev);
1386 if (__rq->q != q) 1386 if (__rq->q != q)
1387 plug->should_sort = 1; 1387 plug->should_sort = 1;
1388 }
1389 if (request_count >= BLK_MAX_REQUEST_COUNT) {
1390 blk_flush_plug_list(plug, false);
1391 trace_block_plug(q);
1392 }
1388 } 1393 }
1389 if (request_count >= BLK_MAX_REQUEST_COUNT)
1390 blk_flush_plug_list(plug, false);
1391 list_add_tail(&req->queuelist, &plug->list); 1394 list_add_tail(&req->queuelist, &plug->list);
1392 drive_stat_acct(req, 1); 1395 drive_stat_acct(req, 1);
1393 } else { 1396 } else {
diff --git a/block/blk-map.c b/block/blk-map.c
index e663ac2d8e68..164cd0059706 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -204,10 +204,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
204 if (!iov[i].iov_len) 204 if (!iov[i].iov_len)
205 return -EINVAL; 205 return -EINVAL;
206 206
207 if (uaddr & queue_dma_alignment(q)) { 207 /*
208 * Keep going so we check length of all segments
209 */
210 if (uaddr & queue_dma_alignment(q))
208 unaligned = 1; 211 unaligned = 1;
209 break;
210 }
211 } 212 }
212 213
213 if (unaligned || (q->dma_pad_mask & len) || map_data) 214 if (unaligned || (q->dma_pad_mask & len) || map_data)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 16ace89613bc..4c12869fcf77 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3184,7 +3184,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
3184 } 3184 }
3185 } 3185 }
3186 3186
3187 if (ret) 3187 if (ret && ret != -EEXIST)
3188 printk(KERN_ERR "cfq: cic link failed!\n"); 3188 printk(KERN_ERR "cfq: cic link failed!\n");
3189 3189
3190 return ret; 3190 return ret;
@@ -3200,6 +3200,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
3200{ 3200{
3201 struct io_context *ioc = NULL; 3201 struct io_context *ioc = NULL;
3202 struct cfq_io_context *cic; 3202 struct cfq_io_context *cic;
3203 int ret;
3203 3204
3204 might_sleep_if(gfp_mask & __GFP_WAIT); 3205 might_sleep_if(gfp_mask & __GFP_WAIT);
3205 3206
@@ -3207,6 +3208,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
3207 if (!ioc) 3208 if (!ioc)
3208 return NULL; 3209 return NULL;
3209 3210
3211retry:
3210 cic = cfq_cic_lookup(cfqd, ioc); 3212 cic = cfq_cic_lookup(cfqd, ioc);
3211 if (cic) 3213 if (cic)
3212 goto out; 3214 goto out;
@@ -3215,7 +3217,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
3215 if (cic == NULL) 3217 if (cic == NULL)
3216 goto err; 3218 goto err;
3217 3219
3218 if (cfq_cic_link(cfqd, ioc, cic, gfp_mask)) 3220 ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
3221 if (ret == -EEXIST) {
3222 /* someone has linked cic to ioc already */
3223 cfq_cic_free(cic);
3224 goto retry;
3225 } else if (ret)
3219 goto err_free; 3226 goto err_free;
3220 3227
3221out: 3228out:
@@ -4036,6 +4043,11 @@ static void *cfq_init_queue(struct request_queue *q)
4036 4043
4037 if (blkio_alloc_blkg_stats(&cfqg->blkg)) { 4044 if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
4038 kfree(cfqg); 4045 kfree(cfqg);
4046
4047 spin_lock(&cic_index_lock);
4048 ida_remove(&cic_index_ida, cfqd->cic_index);
4049 spin_unlock(&cic_index_lock);
4050
4039 kfree(cfqd); 4051 kfree(cfqd);
4040 return NULL; 4052 return NULL;
4041 } 4053 }
diff --git a/block/genhd.c b/block/genhd.c
index 9253839714ff..02e9fca80825 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -19,7 +19,6 @@
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/idr.h> 20#include <linux/idr.h>
21#include <linux/log2.h> 21#include <linux/log2.h>
22#include <linux/ctype.h>
23 22
24#include "blk.h" 23#include "blk.h"
25 24
@@ -916,74 +915,6 @@ static int __init genhd_device_init(void)
916 915
917subsys_initcall(genhd_device_init); 916subsys_initcall(genhd_device_init);
918 917
919static ssize_t alias_show(struct device *dev,
920 struct device_attribute *attr, char *buf)
921{
922 struct gendisk *disk = dev_to_disk(dev);
923 ssize_t ret = 0;
924
925 if (disk->alias)
926 ret = snprintf(buf, ALIAS_LEN, "%s\n", disk->alias);
927 return ret;
928}
929
930static ssize_t alias_store(struct device *dev, struct device_attribute *attr,
931 const char *buf, size_t count)
932{
933 struct gendisk *disk = dev_to_disk(dev);
934 char *alias;
935 char *envp[] = { NULL, NULL };
936 unsigned char c;
937 int i;
938 ssize_t ret = count;
939
940 if (!count)
941 return -EINVAL;
942
943 if (count >= ALIAS_LEN) {
944 printk(KERN_ERR "alias: alias is too long\n");
945 return -EINVAL;
946 }
947
948 /* Validation check */
949 for (i = 0; i < count; i++) {
950 c = buf[i];
951 if (i == count - 1 && c == '\n')
952 break;
953 if (!isalnum(c) && c != '_' && c != '-') {
954 printk(KERN_ERR "alias: invalid alias\n");
955 return -EINVAL;
956 }
957 }
958
959 if (disk->alias) {
960 printk(KERN_INFO "alias: %s is already assigned (%s)\n",
961 disk->disk_name, disk->alias);
962 return -EINVAL;
963 }
964
965 alias = kasprintf(GFP_KERNEL, "%s", buf);
966 if (!alias)
967 return -ENOMEM;
968
969 if (alias[count - 1] == '\n')
970 alias[count - 1] = '\0';
971
972 envp[0] = kasprintf(GFP_KERNEL, "ALIAS=%s", alias);
973 if (!envp[0]) {
974 kfree(alias);
975 return -ENOMEM;
976 }
977
978 disk->alias = alias;
979 printk(KERN_INFO "alias: assigned %s to %s\n", alias, disk->disk_name);
980
981 kobject_uevent_env(&dev->kobj, KOBJ_ADD, envp);
982
983 kfree(envp[0]);
984 return ret;
985}
986
987static ssize_t disk_range_show(struct device *dev, 918static ssize_t disk_range_show(struct device *dev,
988 struct device_attribute *attr, char *buf) 919 struct device_attribute *attr, char *buf)
989{ 920{
@@ -1043,7 +974,6 @@ static ssize_t disk_discard_alignment_show(struct device *dev,
1043 return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue)); 974 return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
1044} 975}
1045 976
1046static DEVICE_ATTR(alias, S_IRUGO|S_IWUSR, alias_show, alias_store);
1047static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); 977static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
1048static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL); 978static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
1049static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); 979static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
@@ -1066,7 +996,6 @@ static struct device_attribute dev_attr_fail_timeout =
1066#endif 996#endif
1067 997
1068static struct attribute *disk_attrs[] = { 998static struct attribute *disk_attrs[] = {
1069 &dev_attr_alias.attr,
1070 &dev_attr_range.attr, 999 &dev_attr_range.attr,
1071 &dev_attr_ext_range.attr, 1000 &dev_attr_ext_range.attr,
1072 &dev_attr_removable.attr, 1001 &dev_attr_removable.attr,
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index a816f24f2d52..a0f768c1d9aa 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -383,6 +383,7 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
383 return 0; 383 return 0;
384} 384}
385 385
386#ifdef CONFIG_NET
386static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 387static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
387{ 388{
388 struct crypto_report_blkcipher rblkcipher; 389 struct crypto_report_blkcipher rblkcipher;
@@ -404,6 +405,12 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
404nla_put_failure: 405nla_put_failure:
405 return -EMSGSIZE; 406 return -EMSGSIZE;
406} 407}
408#else
409static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
410{
411 return -ENOSYS;
412}
413#endif
407 414
408static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) 415static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
409 __attribute__ ((unused)); 416 __attribute__ ((unused));
@@ -457,6 +464,7 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
457 return 0; 464 return 0;
458} 465}
459 466
467#ifdef CONFIG_NET
460static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 468static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
461{ 469{
462 struct crypto_report_blkcipher rblkcipher; 470 struct crypto_report_blkcipher rblkcipher;
@@ -478,6 +486,12 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
478nla_put_failure: 486nla_put_failure:
479 return -EMSGSIZE; 487 return -EMSGSIZE;
480} 488}
489#else
490static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
491{
492 return -ENOSYS;
493}
494#endif
481 495
482static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) 496static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
483 __attribute__ ((unused)); 497 __attribute__ ((unused));
diff --git a/crypto/aead.c b/crypto/aead.c
index 701556ffaaef..04add3dca6fe 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -111,6 +111,7 @@ static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
111 return 0; 111 return 0;
112} 112}
113 113
114#ifdef CONFIG_NET
114static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) 115static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
115{ 116{
116 struct crypto_report_aead raead; 117 struct crypto_report_aead raead;
@@ -132,6 +133,12 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
132nla_put_failure: 133nla_put_failure:
133 return -EMSGSIZE; 134 return -EMSGSIZE;
134} 135}
136#else
137static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
138{
139 return -ENOSYS;
140}
141#endif
135 142
136static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) 143static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
137 __attribute__ ((unused)); 144 __attribute__ ((unused));
@@ -190,6 +197,7 @@ static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
190 return 0; 197 return 0;
191} 198}
192 199
200#ifdef CONFIG_NET
193static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg) 201static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
194{ 202{
195 struct crypto_report_aead raead; 203 struct crypto_report_aead raead;
@@ -210,6 +218,12 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
210nla_put_failure: 218nla_put_failure:
211 return -EMSGSIZE; 219 return -EMSGSIZE;
212} 220}
221#else
222static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
223{
224 return -ENOSYS;
225}
226#endif
213 227
214 228
215static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) 229static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
diff --git a/crypto/ahash.c b/crypto/ahash.c
index a3e6ef99394a..ac93c99cfae8 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -399,6 +399,7 @@ static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
399 return sizeof(struct crypto_shash *); 399 return sizeof(struct crypto_shash *);
400} 400}
401 401
402#ifdef CONFIG_NET
402static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) 403static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
403{ 404{
404 struct crypto_report_hash rhash; 405 struct crypto_report_hash rhash;
@@ -416,6 +417,12 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
416nla_put_failure: 417nla_put_failure:
417 return -EMSGSIZE; 418 return -EMSGSIZE;
418} 419}
420#else
421static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
422{
423 return -ENOSYS;
424}
425#endif
419 426
420static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 427static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
421 __attribute__ ((unused)); 428 __attribute__ ((unused));
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 2572d2600136..1e61d1a888b2 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -494,6 +494,7 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
494 return crypto_init_blkcipher_ops_async(tfm); 494 return crypto_init_blkcipher_ops_async(tfm);
495} 495}
496 496
497#ifdef CONFIG_NET
497static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 498static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
498{ 499{
499 struct crypto_report_blkcipher rblkcipher; 500 struct crypto_report_blkcipher rblkcipher;
@@ -515,6 +516,12 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
515nla_put_failure: 516nla_put_failure:
516 return -EMSGSIZE; 517 return -EMSGSIZE;
517} 518}
519#else
520static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
521{
522 return -ENOSYS;
523}
524#endif
518 525
519static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) 526static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
520 __attribute__ ((unused)); 527 __attribute__ ((unused));
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 2abca780312d..0605a2bbba75 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -44,9 +44,6 @@ static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
44 44
45 down_read(&crypto_alg_sem); 45 down_read(&crypto_alg_sem);
46 46
47 if (list_empty(&crypto_alg_list))
48 return NULL;
49
50 list_for_each_entry(q, &crypto_alg_list, cra_list) { 47 list_for_each_entry(q, &crypto_alg_list, cra_list) {
51 int match = 0; 48 int match = 0;
52 49
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
index fefda78a6a2a..2e458e5482d0 100644
--- a/crypto/pcompress.c
+++ b/crypto/pcompress.c
@@ -48,6 +48,7 @@ static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
48 return 0; 48 return 0;
49} 49}
50 50
51#ifdef CONFIG_NET
51static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg) 52static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
52{ 53{
53 struct crypto_report_comp rpcomp; 54 struct crypto_report_comp rpcomp;
@@ -62,6 +63,12 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
62nla_put_failure: 63nla_put_failure:
63 return -EMSGSIZE; 64 return -EMSGSIZE;
64} 65}
66#else
67static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
68{
69 return -ENOSYS;
70}
71#endif
65 72
66static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) 73static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
67 __attribute__ ((unused)); 74 __attribute__ ((unused));
diff --git a/crypto/rng.c b/crypto/rng.c
index feb7de00f437..64f864fa8043 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -60,6 +60,7 @@ static int crypto_init_rng_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
60 return 0; 60 return 0;
61} 61}
62 62
63#ifdef CONFIG_NET
63static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) 64static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
64{ 65{
65 struct crypto_report_rng rrng; 66 struct crypto_report_rng rrng;
@@ -76,6 +77,12 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
76nla_put_failure: 77nla_put_failure:
77 return -EMSGSIZE; 78 return -EMSGSIZE;
78} 79}
80#else
81static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
82{
83 return -ENOSYS;
84}
85#endif
79 86
80static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) 87static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
81 __attribute__ ((unused)); 88 __attribute__ ((unused));
diff --git a/crypto/shash.c b/crypto/shash.c
index ea8a9c6e21e3..9100912716ae 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -524,6 +524,7 @@ static unsigned int crypto_shash_extsize(struct crypto_alg *alg)
524 return alg->cra_ctxsize; 524 return alg->cra_ctxsize;
525} 525}
526 526
527#ifdef CONFIG_NET
527static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) 528static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
528{ 529{
529 struct crypto_report_hash rhash; 530 struct crypto_report_hash rhash;
@@ -541,6 +542,12 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
541nla_put_failure: 542nla_put_failure:
542 return -EMSGSIZE; 543 return -EMSGSIZE;
543} 544}
545#else
546static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
547{
548 return -ENOSYS;
549}
550#endif
544 551
545static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) 552static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
546 __attribute__ ((unused)); 553 __attribute__ ((unused));
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 127408069ca7..631b9477b99c 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -932,7 +932,8 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
932static int erst_open_pstore(struct pstore_info *psi); 932static int erst_open_pstore(struct pstore_info *psi);
933static int erst_close_pstore(struct pstore_info *psi); 933static int erst_close_pstore(struct pstore_info *psi);
934static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, 934static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
935 struct timespec *time, struct pstore_info *psi); 935 struct timespec *time, char **buf,
936 struct pstore_info *psi);
936static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part, 937static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
937 size_t size, struct pstore_info *psi); 938 size_t size, struct pstore_info *psi);
938static int erst_clearer(enum pstore_type_id type, u64 id, 939static int erst_clearer(enum pstore_type_id type, u64 id,
@@ -986,17 +987,23 @@ static int erst_close_pstore(struct pstore_info *psi)
986} 987}
987 988
988static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, 989static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
989 struct timespec *time, struct pstore_info *psi) 990 struct timespec *time, char **buf,
991 struct pstore_info *psi)
990{ 992{
991 int rc; 993 int rc;
992 ssize_t len = 0; 994 ssize_t len = 0;
993 u64 record_id; 995 u64 record_id;
994 struct cper_pstore_record *rcd = (struct cper_pstore_record *) 996 struct cper_pstore_record *rcd;
995 (erst_info.buf - sizeof(*rcd)); 997 size_t rcd_len = sizeof(*rcd) + erst_info.bufsize;
996 998
997 if (erst_disable) 999 if (erst_disable)
998 return -ENODEV; 1000 return -ENODEV;
999 1001
1002 rcd = kmalloc(rcd_len, GFP_KERNEL);
1003 if (!rcd) {
1004 rc = -ENOMEM;
1005 goto out;
1006 }
1000skip: 1007skip:
1001 rc = erst_get_record_id_next(&reader_pos, &record_id); 1008 rc = erst_get_record_id_next(&reader_pos, &record_id);
1002 if (rc) 1009 if (rc)
@@ -1004,22 +1011,27 @@ skip:
1004 1011
1005 /* no more record */ 1012 /* no more record */
1006 if (record_id == APEI_ERST_INVALID_RECORD_ID) { 1013 if (record_id == APEI_ERST_INVALID_RECORD_ID) {
1007 rc = -1; 1014 rc = -EINVAL;
1008 goto out; 1015 goto out;
1009 } 1016 }
1010 1017
1011 len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) + 1018 len = erst_read(record_id, &rcd->hdr, rcd_len);
1012 erst_info.bufsize);
1013 /* The record may be cleared by others, try read next record */ 1019 /* The record may be cleared by others, try read next record */
1014 if (len == -ENOENT) 1020 if (len == -ENOENT)
1015 goto skip; 1021 goto skip;
1016 else if (len < 0) { 1022 else if (len < sizeof(*rcd)) {
1017 rc = -1; 1023 rc = -EIO;
1018 goto out; 1024 goto out;
1019 } 1025 }
1020 if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0) 1026 if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
1021 goto skip; 1027 goto skip;
1022 1028
1029 *buf = kmalloc(len, GFP_KERNEL);
1030 if (*buf == NULL) {
1031 rc = -ENOMEM;
1032 goto out;
1033 }
1034 memcpy(*buf, rcd->data, len - sizeof(*rcd));
1023 *id = record_id; 1035 *id = record_id;
1024 if (uuid_le_cmp(rcd->sec_hdr.section_type, 1036 if (uuid_le_cmp(rcd->sec_hdr.section_type,
1025 CPER_SECTION_TYPE_DMESG) == 0) 1037 CPER_SECTION_TYPE_DMESG) == 0)
@@ -1037,6 +1049,7 @@ skip:
1037 time->tv_nsec = 0; 1049 time->tv_nsec = 0;
1038 1050
1039out: 1051out:
1052 kfree(rcd);
1040 return (rc < 0) ? rc : (len - sizeof(*rcd)); 1053 return (rc < 0) ? rc : (len - sizeof(*rcd));
1041} 1054}
1042 1055
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 73b2909dddfe..0e8e2de2ed3e 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -224,7 +224,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
224/* 224/*
225 * Suspend / resume control 225 * Suspend / resume control
226 */ 226 */
227static int acpi_idle_suspend;
228static u32 saved_bm_rld; 227static u32 saved_bm_rld;
229 228
230static void acpi_idle_bm_rld_save(void) 229static void acpi_idle_bm_rld_save(void)
@@ -243,21 +242,13 @@ static void acpi_idle_bm_rld_restore(void)
243 242
244int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) 243int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
245{ 244{
246 if (acpi_idle_suspend == 1)
247 return 0;
248
249 acpi_idle_bm_rld_save(); 245 acpi_idle_bm_rld_save();
250 acpi_idle_suspend = 1;
251 return 0; 246 return 0;
252} 247}
253 248
254int acpi_processor_resume(struct acpi_device * device) 249int acpi_processor_resume(struct acpi_device * device)
255{ 250{
256 if (acpi_idle_suspend == 0)
257 return 0;
258
259 acpi_idle_bm_rld_restore(); 251 acpi_idle_bm_rld_restore();
260 acpi_idle_suspend = 0;
261 return 0; 252 return 0;
262} 253}
263 254
@@ -763,13 +754,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
763 754
764 local_irq_disable(); 755 local_irq_disable();
765 756
766 /* Do not access any ACPI IO ports in suspend path */
767 if (acpi_idle_suspend) {
768 local_irq_enable();
769 cpu_relax();
770 return -EINVAL;
771 }
772
773 lapic_timer_state_broadcast(pr, cx, 1); 757 lapic_timer_state_broadcast(pr, cx, 1);
774 kt1 = ktime_get_real(); 758 kt1 = ktime_get_real();
775 acpi_idle_do_entry(cx); 759 acpi_idle_do_entry(cx);
@@ -810,13 +794,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
810 794
811 local_irq_disable(); 795 local_irq_disable();
812 796
813 if (acpi_idle_suspend) {
814 local_irq_enable();
815 cpu_relax();
816 return -EINVAL;
817 }
818
819
820 if (cx->entry_method != ACPI_CSTATE_FFH) { 797 if (cx->entry_method != ACPI_CSTATE_FFH) {
821 current_thread_info()->status &= ~TS_POLLING; 798 current_thread_info()->status &= ~TS_POLLING;
822 /* 799 /*
@@ -895,12 +872,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
895 if (unlikely(!pr)) 872 if (unlikely(!pr))
896 return -EINVAL; 873 return -EINVAL;
897 874
898
899 if (acpi_idle_suspend) {
900 cpu_relax();
901 return -EINVAL;
902 }
903
904 if (!cx->bm_sts_skip && acpi_idle_bm_check()) { 875 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
905 if (drv->safe_state_index >= 0) { 876 if (drv->safe_state_index >= 0) {
906 return drv->states[drv->safe_state_index].enter(dev, 877 return drv->states[drv->safe_state_index].enter(dev,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index fb7b90b05922..cf26222a93c5 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -390,6 +390,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
390 /* Promise */ 390 /* Promise */
391 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 391 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
392 392
393 /* Asmedia */
394 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1061 */
395
393 /* Generic, PCI class code for AHCI */ 396 /* Generic, PCI class code for AHCI */
394 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 397 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
395 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, 398 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 004f2ce3dc73..43b875810d1b 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -65,9 +65,9 @@ static struct scsi_host_template ahci_platform_sht = {
65static int __init ahci_probe(struct platform_device *pdev) 65static int __init ahci_probe(struct platform_device *pdev)
66{ 66{
67 struct device *dev = &pdev->dev; 67 struct device *dev = &pdev->dev;
68 struct ahci_platform_data *pdata = dev->platform_data; 68 struct ahci_platform_data *pdata = dev_get_platdata(dev);
69 const struct platform_device_id *id = platform_get_device_id(pdev); 69 const struct platform_device_id *id = platform_get_device_id(pdev);
70 struct ata_port_info pi = ahci_port_info[id->driver_data]; 70 struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0];
71 const struct ata_port_info *ppi[] = { &pi, NULL }; 71 const struct ata_port_info *ppi[] = { &pi, NULL };
72 struct ahci_host_priv *hpriv; 72 struct ahci_host_priv *hpriv;
73 struct ata_host *host; 73 struct ata_host *host;
@@ -191,7 +191,7 @@ err0:
191static int __devexit ahci_remove(struct platform_device *pdev) 191static int __devexit ahci_remove(struct platform_device *pdev)
192{ 192{
193 struct device *dev = &pdev->dev; 193 struct device *dev = &pdev->dev;
194 struct ahci_platform_data *pdata = dev->platform_data; 194 struct ahci_platform_data *pdata = dev_get_platdata(dev);
195 struct ata_host *host = dev_get_drvdata(dev); 195 struct ata_host *host = dev_get_drvdata(dev);
196 196
197 ata_host_detach(host); 197 ata_host_detach(host);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index f22957c2769a..a9b282038000 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2883,7 +2883,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2883 sata_scr_read(link, SCR_STATUS, &sstatus)) 2883 sata_scr_read(link, SCR_STATUS, &sstatus))
2884 rc = -ERESTART; 2884 rc = -ERESTART;
2885 2885
2886 if (rc == -ERESTART || try >= max_tries) { 2886 if (try >= max_tries) {
2887 /* 2887 /*
2888 * Thaw host port even if reset failed, so that the port 2888 * Thaw host port even if reset failed, so that the port
2889 * can be retried on the next phy event. This risks 2889 * can be retried on the next phy event. This risks
@@ -2909,6 +2909,16 @@ int ata_eh_reset(struct ata_link *link, int classify,
2909 ata_eh_acquire(ap); 2909 ata_eh_acquire(ap);
2910 } 2910 }
2911 2911
2912 /*
2913 * While disks spinup behind PMP, some controllers fail sending SRST.
2914 * They need to be reset - as well as the PMP - before retrying.
2915 */
2916 if (rc == -ERESTART) {
2917 if (ata_is_host_link(link))
2918 ata_eh_thaw_port(ap);
2919 goto out;
2920 }
2921
2912 if (try == max_tries - 1) { 2922 if (try == max_tries - 1) {
2913 sata_down_spd_limit(link, 0); 2923 sata_down_spd_limit(link, 0);
2914 if (slave) 2924 if (slave)
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 104462dbc524..21b80c555c60 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -389,12 +389,9 @@ static void sata_pmp_quirks(struct ata_port *ap)
389 /* link reports offline after LPM */ 389 /* link reports offline after LPM */
390 link->flags |= ATA_LFLAG_NO_LPM; 390 link->flags |= ATA_LFLAG_NO_LPM;
391 391
392 /* Class code report is unreliable and SRST 392 /* Class code report is unreliable. */
393 * times out under certain configurations.
394 */
395 if (link->pmp < 5) 393 if (link->pmp < 5)
396 link->flags |= ATA_LFLAG_NO_SRST | 394 link->flags |= ATA_LFLAG_ASSUME_ATA;
397 ATA_LFLAG_ASSUME_ATA;
398 395
399 /* port 5 is for SEMB device and it doesn't like SRST */ 396 /* port 5 is for SEMB device and it doesn't like SRST */
400 if (link->pmp == 5) 397 if (link->pmp == 5)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 72a9770ac42f..2a5412e7e9c1 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1217,6 +1217,10 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
1217 1217
1218/** 1218/**
1219 * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth 1219 * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
1220 * @ap: ATA port to which the device change the queue depth
1221 * @sdev: SCSI device to configure queue depth for
1222 * @queue_depth: new queue depth
1223 * @reason: calling context
1220 * 1224 *
1221 * libsas and libata have different approaches for associating a sdev to 1225 * libsas and libata have different approaches for associating a sdev to
1222 * its ata_port. 1226 * its ata_port.
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 63d53277d6a9..4cadfa28f940 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2533,10 +2533,12 @@ static int ata_pci_init_one(struct pci_dev *pdev,
2533 if (rc) 2533 if (rc)
2534 goto out; 2534 goto out;
2535 2535
2536#ifdef CONFIG_ATA_BMDMA
2536 if (bmdma) 2537 if (bmdma)
2537 /* prepare and activate BMDMA host */ 2538 /* prepare and activate BMDMA host */
2538 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); 2539 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2539 else 2540 else
2541#endif
2540 /* prepare and activate SFF host */ 2542 /* prepare and activate SFF host */
2541 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 2543 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2542 if (rc) 2544 if (rc)
@@ -2544,10 +2546,12 @@ static int ata_pci_init_one(struct pci_dev *pdev,
2544 host->private_data = host_priv; 2546 host->private_data = host_priv;
2545 host->flags |= hflags; 2547 host->flags |= hflags;
2546 2548
2549#ifdef CONFIG_ATA_BMDMA
2547 if (bmdma) { 2550 if (bmdma) {
2548 pci_set_master(pdev); 2551 pci_set_master(pdev);
2549 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); 2552 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
2550 } else 2553 } else
2554#endif
2551 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); 2555 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2552out: 2556out:
2553 if (rc == 0) 2557 if (rc == 0)
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index a72ab0dde4e5..2a472c5bb7db 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -52,7 +52,7 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
52 } 52 }
53 53
54 ret = of_irq_to_resource(dn, 0, &irq_res); 54 ret = of_irq_to_resource(dn, 0, &irq_res);
55 if (ret == NO_IRQ) 55 if (!ret)
56 irq_res.start = irq_res.end = 0; 56 irq_res.start = irq_res.end = 0;
57 else 57 else
58 irq_res.flags = 0; 58 irq_res.flags = 0;
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 447d9c05fb5a..95ec435f0eb4 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -104,7 +104,7 @@ static const struct ata_port_info sis_port_info = {
104}; 104};
105 105
106MODULE_AUTHOR("Uwe Koziolek"); 106MODULE_AUTHOR("Uwe Koziolek");
107MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller"); 107MODULE_DESCRIPTION("low-level driver for Silicon Integrated Systems SATA controller");
108MODULE_LICENSE("GPL"); 108MODULE_LICENSE("GPL");
109MODULE_DEVICE_TABLE(pci, sis_pci_tbl); 109MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
110MODULE_VERSION(DRV_VERSION); 110MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 82c865452c70..919daa7cd5b1 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -22,6 +22,7 @@
22#include <linux/kallsyms.h> 22#include <linux/kallsyms.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/async.h> 24#include <linux/async.h>
25#include <linux/pm_runtime.h>
25 26
26#include "base.h" 27#include "base.h"
27#include "power/power.h" 28#include "power/power.h"
@@ -1743,6 +1744,10 @@ void device_shutdown(void)
1743 list_del_init(&dev->kobj.entry); 1744 list_del_init(&dev->kobj.entry);
1744 spin_unlock(&devices_kset->list_lock); 1745 spin_unlock(&devices_kset->list_lock);
1745 1746
1747 /* Don't allow any more runtime suspends */
1748 pm_runtime_get_noresume(dev);
1749 pm_runtime_barrier(dev);
1750
1746 if (dev->bus && dev->bus->shutdown) { 1751 if (dev->bus && dev->bus->shutdown) {
1747 dev_dbg(dev, "shutdown\n"); 1752 dev_dbg(dev, "shutdown\n");
1748 dev->bus->shutdown(dev); 1753 dev->bus->shutdown(dev);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 793f796c4da3..5693ecee9a40 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -127,12 +127,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
127 nid, K(node_page_state(nid, NR_WRITEBACK)), 127 nid, K(node_page_state(nid, NR_WRITEBACK)),
128 nid, K(node_page_state(nid, NR_FILE_PAGES)), 128 nid, K(node_page_state(nid, NR_FILE_PAGES)),
129 nid, K(node_page_state(nid, NR_FILE_MAPPED)), 129 nid, K(node_page_state(nid, NR_FILE_MAPPED)),
130 nid, K(node_page_state(nid, NR_ANON_PAGES)
131#ifdef CONFIG_TRANSPARENT_HUGEPAGE 130#ifdef CONFIG_TRANSPARENT_HUGEPAGE
131 nid, K(node_page_state(nid, NR_ANON_PAGES)
132 + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * 132 + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
133 HPAGE_PMD_NR 133 HPAGE_PMD_NR),
134#else
135 nid, K(node_page_state(nid, NR_ANON_PAGES)),
134#endif 136#endif
135 ),
136 nid, K(node_page_state(nid, NR_SHMEM)), 137 nid, K(node_page_state(nid, NR_SHMEM)),
137 nid, node_page_state(nid, NR_KERNEL_STACK) * 138 nid, node_page_state(nid, NR_KERNEL_STACK) *
138 THREAD_SIZE / 1024, 139 THREAD_SIZE / 1024,
@@ -143,13 +144,14 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
143 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + 144 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
144 node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), 145 node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
145 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), 146 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
146 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
147#ifdef CONFIG_TRANSPARENT_HUGEPAGE 147#ifdef CONFIG_TRANSPARENT_HUGEPAGE
148 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
148 , nid, 149 , nid,
149 K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * 150 K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
150 HPAGE_PMD_NR) 151 HPAGE_PMD_NR));
152#else
153 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
151#endif 154#endif
152 );
153 n += hugetlb_report_node_meminfo(nid, buf + n); 155 n += hugetlb_report_node_meminfo(nid, buf + n);
154 return n; 156 return n;
155} 157}
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 5f0f85d5c576..428e55e012dc 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -229,7 +229,8 @@ int pm_clk_suspend(struct device *dev)
229 229
230 list_for_each_entry_reverse(ce, &psd->clock_list, node) { 230 list_for_each_entry_reverse(ce, &psd->clock_list, node) {
231 if (ce->status < PCE_STATUS_ERROR) { 231 if (ce->status < PCE_STATUS_ERROR) {
232 clk_disable(ce->clk); 232 if (ce->status == PCE_STATUS_ENABLED)
233 clk_disable(ce->clk);
233 ce->status = PCE_STATUS_ACQUIRED; 234 ce->status = PCE_STATUS_ACQUIRED;
234 } 235 }
235 } 236 }
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 7fa098464dae..c3d2dfcf438d 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -920,7 +920,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
920 End: 920 End:
921 if (!error) { 921 if (!error) {
922 dev->power.is_suspended = true; 922 dev->power.is_suspended = true;
923 if (dev->power.wakeup_path && dev->parent) 923 if (dev->power.wakeup_path
924 && dev->parent && !dev->parent->power.ignore_children)
924 dev->parent->power.wakeup_path = true; 925 dev->parent->power.wakeup_path = true;
925 } 926 }
926 927
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 434a6c011675..95706fa24c73 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -669,7 +669,7 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
669 struct device_opp *dev_opp = find_device_opp(dev); 669 struct device_opp *dev_opp = find_device_opp(dev);
670 670
671 if (IS_ERR(dev_opp)) 671 if (IS_ERR(dev_opp))
672 return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */ 672 return ERR_CAST(dev_opp); /* matching type */
673 673
674 return &dev_opp->head; 674 return &dev_opp->head;
675} 675}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 30a94eadc200..86de6c50fc41 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -212,11 +212,9 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
212 if (!dev || !req) /*guard against callers passing in null */ 212 if (!dev || !req) /*guard against callers passing in null */
213 return -EINVAL; 213 return -EINVAL;
214 214
215 if (dev_pm_qos_request_active(req)) { 215 if (WARN(dev_pm_qos_request_active(req),
216 WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already " 216 "%s() called for already added request\n", __func__))
217 "added request\n");
218 return -EINVAL; 217 return -EINVAL;
219 }
220 218
221 req->dev = dev; 219 req->dev = dev;
222 220
@@ -271,11 +269,9 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
271 if (!req) /*guard against callers passing in null */ 269 if (!req) /*guard against callers passing in null */
272 return -EINVAL; 270 return -EINVAL;
273 271
274 if (!dev_pm_qos_request_active(req)) { 272 if (WARN(!dev_pm_qos_request_active(req),
275 WARN(1, KERN_ERR "dev_pm_qos_update_request() called for " 273 "%s() called for unknown object\n", __func__))
276 "unknown object\n");
277 return -EINVAL; 274 return -EINVAL;
278 }
279 275
280 mutex_lock(&dev_pm_qos_mtx); 276 mutex_lock(&dev_pm_qos_mtx);
281 277
@@ -312,11 +308,9 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
312 if (!req) /*guard against callers passing in null */ 308 if (!req) /*guard against callers passing in null */
313 return -EINVAL; 309 return -EINVAL;
314 310
315 if (!dev_pm_qos_request_active(req)) { 311 if (WARN(!dev_pm_qos_request_active(req),
316 WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for " 312 "%s() called for unknown object\n", __func__))
317 "unknown object\n");
318 return -EINVAL; 313 return -EINVAL;
319 }
320 314
321 mutex_lock(&dev_pm_qos_mtx); 315 mutex_lock(&dev_pm_qos_mtx);
322 316
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 486f94ef24d4..587cce57adae 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -24,6 +24,7 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/pci-aspm.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/slab.h> 29#include <linux/slab.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
@@ -2600,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
2600 c->Request.Timeout = 0; 2601 c->Request.Timeout = 0;
2601 c->Request.CDB[0] = BMIC_WRITE; 2602 c->Request.CDB[0] = BMIC_WRITE;
2602 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 2603 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
2604 c->Request.CDB[7] = (size >> 8) & 0xFF;
2605 c->Request.CDB[8] = size & 0xFF;
2603 break; 2606 break;
2604 case TEST_UNIT_READY: 2607 case TEST_UNIT_READY:
2605 c->Request.CDBLen = 6; 2608 c->Request.CDBLen = 6;
@@ -4319,6 +4322,10 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
4319 dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); 4322 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
4320 return -ENODEV; 4323 return -ENODEV;
4321 } 4324 }
4325
4326 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
4327 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
4328
4322 err = pci_enable_device(h->pdev); 4329 err = pci_enable_device(h->pdev);
4323 if (err) { 4330 if (err) {
4324 dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n"); 4331 dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n");
@@ -4875,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
4875{ 4882{
4876 if (h->msix_vector || h->msi_vector) { 4883 if (h->msix_vector || h->msi_vector) {
4877 if (!request_irq(h->intr[h->intr_mode], msixhandler, 4884 if (!request_irq(h->intr[h->intr_mode], msixhandler,
4878 IRQF_DISABLED, h->devname, h)) 4885 0, h->devname, h))
4879 return 0; 4886 return 0;
4880 dev_err(&h->pdev->dev, "Unable to get msi irq %d" 4887 dev_err(&h->pdev->dev, "Unable to get msi irq %d"
4881 " for %s\n", h->intr[h->intr_mode], 4888 " for %s\n", h->intr[h->intr_mode],
@@ -4884,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
4884 } 4891 }
4885 4892
4886 if (!request_irq(h->intr[h->intr_mode], intxhandler, 4893 if (!request_irq(h->intr[h->intr_mode], intxhandler,
4887 IRQF_DISABLED, h->devname, h)) 4894 IRQF_SHARED, h->devname, h))
4888 return 0; 4895 return 0;
4889 dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", 4896 dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
4890 h->intr[h->intr_mode], h->devname); 4897 h->intr[h->intr_mode], h->devname);
@@ -5158,6 +5165,7 @@ reinit_after_soft_reset:
5158 h->cciss_max_sectors = 8192; 5165 h->cciss_max_sectors = 8192;
5159 5166
5160 rebuild_lun_table(h, 1, 0); 5167 rebuild_lun_table(h, 1, 0);
5168 cciss_engage_scsi(h);
5161 h->busy_initializing = 0; 5169 h->busy_initializing = 0;
5162 return 1; 5170 return 1;
5163 5171
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 951a4e33b92b..e820b68d2f6c 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -1720,5 +1720,6 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
1720/* If no tape support, then these become defined out of existence */ 1720/* If no tape support, then these become defined out of existence */
1721 1721
1722#define cciss_scsi_setup(cntl_num) 1722#define cciss_scsi_setup(cntl_num)
1723#define cciss_engage_scsi(h)
1723 1724
1724#endif /* CONFIG_CISS_SCSI_TAPE */ 1725#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 3d806820280e..1e888c9e85b3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -161,17 +161,19 @@ static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
161 &xor_funcs 161 &xor_funcs
162}; 162};
163 163
164static loff_t get_loop_size(struct loop_device *lo, struct file *file) 164static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
165{ 165{
166 loff_t size, offset, loopsize; 166 loff_t size, loopsize;
167 167
168 /* Compute loopsize in bytes */ 168 /* Compute loopsize in bytes */
169 size = i_size_read(file->f_mapping->host); 169 size = i_size_read(file->f_mapping->host);
170 offset = lo->lo_offset;
171 loopsize = size - offset; 170 loopsize = size - offset;
172 if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) 171 /* offset is beyond i_size, wierd but possible */
173 loopsize = lo->lo_sizelimit; 172 if (loopsize < 0)
173 return 0;
174 174
175 if (sizelimit > 0 && sizelimit < loopsize)
176 loopsize = sizelimit;
175 /* 177 /*
176 * Unfortunately, if we want to do I/O on the device, 178 * Unfortunately, if we want to do I/O on the device,
177 * the number of 512-byte sectors has to fit into a sector_t. 179 * the number of 512-byte sectors has to fit into a sector_t.
@@ -179,17 +181,25 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
179 return loopsize >> 9; 181 return loopsize >> 9;
180} 182}
181 183
184static loff_t get_loop_size(struct loop_device *lo, struct file *file)
185{
186 return get_size(lo->lo_offset, lo->lo_sizelimit, file);
187}
188
182static int 189static int
183figure_loop_size(struct loop_device *lo) 190figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
184{ 191{
185 loff_t size = get_loop_size(lo, lo->lo_backing_file); 192 loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
186 sector_t x = (sector_t)size; 193 sector_t x = (sector_t)size;
187 194
188 if (unlikely((loff_t)x != size)) 195 if (unlikely((loff_t)x != size))
189 return -EFBIG; 196 return -EFBIG;
190 197 if (lo->lo_offset != offset)
198 lo->lo_offset = offset;
199 if (lo->lo_sizelimit != sizelimit)
200 lo->lo_sizelimit = sizelimit;
191 set_capacity(lo->lo_disk, x); 201 set_capacity(lo->lo_disk, x);
192 return 0; 202 return 0;
193} 203}
194 204
195static inline int 205static inline int
@@ -372,7 +382,8 @@ do_lo_receive(struct loop_device *lo,
372 382
373 if (retval < 0) 383 if (retval < 0)
374 return retval; 384 return retval;
375 385 if (retval != bvec->bv_len)
386 return -EIO;
376 return 0; 387 return 0;
377} 388}
378 389
@@ -411,7 +422,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
411 422
412 /* 423 /*
413 * We use punch hole to reclaim the free space used by the 424 * We use punch hole to reclaim the free space used by the
414 * image a.k.a. discard. However we do support discard if 425 * image a.k.a. discard. However we do not support discard if
415 * encryption is enabled, because it may give an attacker 426 * encryption is enabled, because it may give an attacker
416 * useful information. 427 * useful information.
417 */ 428 */
@@ -786,7 +797,7 @@ static void loop_config_discard(struct loop_device *lo)
786 } 797 }
787 798
788 q->limits.discard_granularity = inode->i_sb->s_blocksize; 799 q->limits.discard_granularity = inode->i_sb->s_blocksize;
789 q->limits.discard_alignment = inode->i_sb->s_blocksize; 800 q->limits.discard_alignment = 0;
790 q->limits.max_discard_sectors = UINT_MAX >> 9; 801 q->limits.max_discard_sectors = UINT_MAX >> 9;
791 q->limits.discard_zeroes_data = 1; 802 q->limits.discard_zeroes_data = 1;
792 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 803 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
@@ -1058,9 +1069,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1058 1069
1059 if (lo->lo_offset != info->lo_offset || 1070 if (lo->lo_offset != info->lo_offset ||
1060 lo->lo_sizelimit != info->lo_sizelimit) { 1071 lo->lo_sizelimit != info->lo_sizelimit) {
1061 lo->lo_offset = info->lo_offset; 1072 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit))
1062 lo->lo_sizelimit = info->lo_sizelimit;
1063 if (figure_loop_size(lo))
1064 return -EFBIG; 1073 return -EFBIG;
1065 } 1074 }
1066 loop_config_discard(lo); 1075 loop_config_discard(lo);
@@ -1246,7 +1255,7 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
1246 err = -ENXIO; 1255 err = -ENXIO;
1247 if (unlikely(lo->lo_state != Lo_bound)) 1256 if (unlikely(lo->lo_state != Lo_bound))
1248 goto out; 1257 goto out;
1249 err = figure_loop_size(lo); 1258 err = figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
1250 if (unlikely(err)) 1259 if (unlikely(err))
1251 goto out; 1260 goto out;
1252 sec = get_capacity(lo->lo_disk); 1261 sec = get_capacity(lo->lo_disk);
@@ -1284,13 +1293,19 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1284 goto out_unlocked; 1293 goto out_unlocked;
1285 break; 1294 break;
1286 case LOOP_SET_STATUS: 1295 case LOOP_SET_STATUS:
1287 err = loop_set_status_old(lo, (struct loop_info __user *) arg); 1296 err = -EPERM;
1297 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1298 err = loop_set_status_old(lo,
1299 (struct loop_info __user *)arg);
1288 break; 1300 break;
1289 case LOOP_GET_STATUS: 1301 case LOOP_GET_STATUS:
1290 err = loop_get_status_old(lo, (struct loop_info __user *) arg); 1302 err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1291 break; 1303 break;
1292 case LOOP_SET_STATUS64: 1304 case LOOP_SET_STATUS64:
1293 err = loop_set_status64(lo, (struct loop_info64 __user *) arg); 1305 err = -EPERM;
1306 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1307 err = loop_set_status64(lo,
1308 (struct loop_info64 __user *) arg);
1294 break; 1309 break;
1295 case LOOP_GET_STATUS64: 1310 case LOOP_GET_STATUS64:
1296 err = loop_get_status64(lo, (struct loop_info64 __user *) arg); 1311 err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 6b9a2000d56a..a79fb4f7ff62 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -630,6 +630,7 @@ static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t
630 if (dev->status & 0x10) 630 if (dev->status & 0x10)
631 return -ETIME; 631 return -ETIME;
632 632
633 memset(&hdr, 0, sizeof(hdr));
633 hdr.magic = PG_MAGIC; 634 hdr.magic = PG_MAGIC;
634 hdr.dlen = dev->dlen; 635 hdr.dlen = dev->dlen;
635 copy = 0; 636 copy = 0;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 65cc424359b0..148ab944378d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list); /* clients */
183 183
184static int __rbd_init_snaps_header(struct rbd_device *rbd_dev); 184static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
185static void rbd_dev_release(struct device *dev); 185static void rbd_dev_release(struct device *dev);
186static ssize_t rbd_snap_rollback(struct device *dev,
187 struct device_attribute *attr,
188 const char *buf,
189 size_t size);
190static ssize_t rbd_snap_add(struct device *dev, 186static ssize_t rbd_snap_add(struct device *dev,
191 struct device_attribute *attr, 187 struct device_attribute *attr,
192 const char *buf, 188 const char *buf,
@@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
461 u32 snap_count = le32_to_cpu(ondisk->snap_count); 457 u32 snap_count = le32_to_cpu(ondisk->snap_count);
462 int ret = -ENOMEM; 458 int ret = -ENOMEM;
463 459
460 if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
461 return -ENXIO;
462 }
463
464 init_rwsem(&header->snap_rwsem); 464 init_rwsem(&header->snap_rwsem);
465 header->snap_names_len = le64_to_cpu(ondisk->snap_names_len); 465 header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
466 header->snapc = kmalloc(sizeof(struct ceph_snap_context) + 466 header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
@@ -1356,32 +1356,6 @@ fail:
1356} 1356}
1357 1357
1358/* 1358/*
1359 * Request sync osd rollback
1360 */
1361static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
1362 u64 snapid,
1363 const char *obj)
1364{
1365 struct ceph_osd_req_op *ops;
1366 int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
1367 if (ret < 0)
1368 return ret;
1369
1370 ops[0].snap.snapid = snapid;
1371
1372 ret = rbd_req_sync_op(dev, NULL,
1373 CEPH_NOSNAP,
1374 0,
1375 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1376 ops,
1377 1, obj, 0, 0, NULL, NULL, NULL);
1378
1379 rbd_destroy_ops(ops);
1380
1381 return ret;
1382}
1383
1384/*
1385 * Request sync osd read 1359 * Request sync osd read
1386 */ 1360 */
1387static int rbd_req_sync_exec(struct rbd_device *dev, 1361static int rbd_req_sync_exec(struct rbd_device *dev,
@@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
1610 goto out_dh; 1584 goto out_dh;
1611 1585
1612 rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL); 1586 rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
1613 if (rc < 0) 1587 if (rc < 0) {
1588 if (rc == -ENXIO) {
1589 pr_warning("unrecognized header format"
1590 " for image %s", rbd_dev->obj);
1591 }
1614 goto out_dh; 1592 goto out_dh;
1593 }
1615 1594
1616 if (snap_count != header->total_snaps) { 1595 if (snap_count != header->total_snaps) {
1617 snap_count = header->total_snaps; 1596 snap_count = header->total_snaps;
@@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
1882static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); 1861static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
1883static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); 1862static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
1884static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add); 1863static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
1885static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
1886 1864
1887static struct attribute *rbd_attrs[] = { 1865static struct attribute *rbd_attrs[] = {
1888 &dev_attr_size.attr, 1866 &dev_attr_size.attr,
@@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
1893 &dev_attr_current_snap.attr, 1871 &dev_attr_current_snap.attr,
1894 &dev_attr_refresh.attr, 1872 &dev_attr_refresh.attr,
1895 &dev_attr_create_snap.attr, 1873 &dev_attr_create_snap.attr,
1896 &dev_attr_rollback_snap.attr,
1897 NULL 1874 NULL
1898}; 1875};
1899 1876
@@ -2424,64 +2401,6 @@ err_unlock:
2424 return ret; 2401 return ret;
2425} 2402}
2426 2403
2427static ssize_t rbd_snap_rollback(struct device *dev,
2428 struct device_attribute *attr,
2429 const char *buf,
2430 size_t count)
2431{
2432 struct rbd_device *rbd_dev = dev_to_rbd(dev);
2433 int ret;
2434 u64 snapid;
2435 u64 cur_ofs;
2436 char *seg_name = NULL;
2437 char *snap_name = kmalloc(count + 1, GFP_KERNEL);
2438 ret = -ENOMEM;
2439 if (!snap_name)
2440 return ret;
2441
2442 /* parse snaps add command */
2443 snprintf(snap_name, count, "%s", buf);
2444 seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
2445 if (!seg_name)
2446 goto done;
2447
2448 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2449
2450 ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
2451 if (ret < 0)
2452 goto done_unlock;
2453
2454 dout("snapid=%lld\n", snapid);
2455
2456 cur_ofs = 0;
2457 while (cur_ofs < rbd_dev->header.image_size) {
2458 cur_ofs += rbd_get_segment(&rbd_dev->header,
2459 rbd_dev->obj,
2460 cur_ofs, (u64)-1,
2461 seg_name, NULL);
2462 dout("seg_name=%s\n", seg_name);
2463
2464 ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
2465 if (ret < 0)
2466 pr_warning("could not roll back obj %s err=%d\n",
2467 seg_name, ret);
2468 }
2469
2470 ret = __rbd_update_snaps(rbd_dev);
2471 if (ret < 0)
2472 goto done_unlock;
2473
2474 ret = count;
2475
2476done_unlock:
2477 mutex_unlock(&ctl_mutex);
2478done:
2479 kfree(seg_name);
2480 kfree(snap_name);
2481
2482 return ret;
2483}
2484
2485static struct bus_attribute rbd_bus_attrs[] = { 2404static struct bus_attribute rbd_bus_attrs[] = {
2486 __ATTR(add, S_IWUSR, NULL, rbd_add), 2405 __ATTR(add, S_IWUSR, NULL, rbd_add),
2487 __ATTR(remove, S_IWUSR, NULL, rbd_remove), 2406 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index ae3e167e17ad..89ddab127e33 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -16,6 +16,8 @@
16 * handle GCR disks 16 * handle GCR disks
17 */ 17 */
18 18
19#undef DEBUG
20
19#include <linux/stddef.h> 21#include <linux/stddef.h>
20#include <linux/kernel.h> 22#include <linux/kernel.h>
21#include <linux/sched.h> 23#include <linux/sched.h>
@@ -36,13 +38,11 @@
36#include <asm/machdep.h> 38#include <asm/machdep.h>
37#include <asm/pmac_feature.h> 39#include <asm/pmac_feature.h>
38 40
39static DEFINE_MUTEX(swim3_mutex);
40static struct request_queue *swim3_queue;
41static struct gendisk *disks[2];
42static struct request *fd_req;
43
44#define MAX_FLOPPIES 2 41#define MAX_FLOPPIES 2
45 42
43static DEFINE_MUTEX(swim3_mutex);
44static struct gendisk *disks[MAX_FLOPPIES];
45
46enum swim_state { 46enum swim_state {
47 idle, 47 idle,
48 locating, 48 locating,
@@ -177,7 +177,6 @@ struct swim3 {
177 177
178struct floppy_state { 178struct floppy_state {
179 enum swim_state state; 179 enum swim_state state;
180 spinlock_t lock;
181 struct swim3 __iomem *swim3; /* hardware registers */ 180 struct swim3 __iomem *swim3; /* hardware registers */
182 struct dbdma_regs __iomem *dma; /* DMA controller registers */ 181 struct dbdma_regs __iomem *dma; /* DMA controller registers */
183 int swim3_intr; /* interrupt number for SWIM3 */ 182 int swim3_intr; /* interrupt number for SWIM3 */
@@ -204,8 +203,20 @@ struct floppy_state {
204 int wanted; 203 int wanted;
205 struct macio_dev *mdev; 204 struct macio_dev *mdev;
206 char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)]; 205 char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
206 int index;
207 struct request *cur_req;
207}; 208};
208 209
210#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
211#define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
212#define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
213
214#ifdef DEBUG
215#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
216#else
217#define swim3_dbg(fmt, arg...) do { } while(0)
218#endif
219
209static struct floppy_state floppy_states[MAX_FLOPPIES]; 220static struct floppy_state floppy_states[MAX_FLOPPIES];
210static int floppy_count = 0; 221static int floppy_count = 0;
211static DEFINE_SPINLOCK(swim3_lock); 222static DEFINE_SPINLOCK(swim3_lock);
@@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
224 0, 0, 0, 0, 0, 0 235 0, 0, 0, 0, 0, 0
225}; 236};
226 237
227static void swim3_select(struct floppy_state *fs, int sel);
228static void swim3_action(struct floppy_state *fs, int action);
229static int swim3_readbit(struct floppy_state *fs, int bit);
230static void do_fd_request(struct request_queue * q);
231static void start_request(struct floppy_state *fs);
232static void set_timeout(struct floppy_state *fs, int nticks,
233 void (*proc)(unsigned long));
234static void scan_track(struct floppy_state *fs);
235static void seek_track(struct floppy_state *fs, int n); 238static void seek_track(struct floppy_state *fs, int n);
236static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count); 239static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
237static void setup_transfer(struct floppy_state *fs);
238static void act(struct floppy_state *fs); 240static void act(struct floppy_state *fs);
239static void scan_timeout(unsigned long data); 241static void scan_timeout(unsigned long data);
240static void seek_timeout(unsigned long data); 242static void seek_timeout(unsigned long data);
@@ -254,18 +256,21 @@ static unsigned int floppy_check_events(struct gendisk *disk,
254 unsigned int clearing); 256 unsigned int clearing);
255static int floppy_revalidate(struct gendisk *disk); 257static int floppy_revalidate(struct gendisk *disk);
256 258
257static bool swim3_end_request(int err, unsigned int nr_bytes) 259static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
258{ 260{
259 if (__blk_end_request(fd_req, err, nr_bytes)) 261 struct request *req = fs->cur_req;
260 return true; 262 int rc;
261 263
262 fd_req = NULL; 264 swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
263 return false; 265 err, nr_bytes, req);
264}
265 266
266static bool swim3_end_request_cur(int err) 267 if (err)
267{ 268 nr_bytes = blk_rq_cur_bytes(req);
268 return swim3_end_request(err, blk_rq_cur_bytes(fd_req)); 269 rc = __blk_end_request(req, err, nr_bytes);
270 if (rc)
271 return true;
272 fs->cur_req = NULL;
273 return false;
269} 274}
270 275
271static void swim3_select(struct floppy_state *fs, int sel) 276static void swim3_select(struct floppy_state *fs, int sel)
@@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
303 return (stat & DATA) == 0; 308 return (stat & DATA) == 0;
304} 309}
305 310
306static void do_fd_request(struct request_queue * q)
307{
308 int i;
309
310 for(i=0; i<floppy_count; i++) {
311 struct floppy_state *fs = &floppy_states[i];
312 if (fs->mdev->media_bay &&
313 check_media_bay(fs->mdev->media_bay) != MB_FD)
314 continue;
315 start_request(fs);
316 }
317}
318
319static void start_request(struct floppy_state *fs) 311static void start_request(struct floppy_state *fs)
320{ 312{
321 struct request *req; 313 struct request *req;
322 unsigned long x; 314 unsigned long x;
323 315
316 swim3_dbg("start request, initial state=%d\n", fs->state);
317
324 if (fs->state == idle && fs->wanted) { 318 if (fs->state == idle && fs->wanted) {
325 fs->state = available; 319 fs->state = available;
326 wake_up(&fs->wait); 320 wake_up(&fs->wait);
327 return; 321 return;
328 } 322 }
329 while (fs->state == idle) { 323 while (fs->state == idle) {
330 if (!fd_req) { 324 swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
331 fd_req = blk_fetch_request(swim3_queue); 325 if (!fs->cur_req) {
332 if (!fd_req) 326 fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
327 swim3_dbg(" fetched request %p\n", fs->cur_req);
328 if (!fs->cur_req)
333 break; 329 break;
334 } 330 }
335 req = fd_req; 331 req = fs->cur_req;
336#if 0 332
337 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n", 333 if (fs->mdev->media_bay &&
338 req->rq_disk->disk_name, req->cmd, 334 check_media_bay(fs->mdev->media_bay) != MB_FD) {
339 (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer); 335 swim3_dbg("%s", " media bay absent, dropping req\n");
340 printk(" errors=%d current_nr_sectors=%u\n", 336 swim3_end_request(fs, -ENODEV, 0);
341 req->errors, blk_rq_cur_sectors(req)); 337 continue;
338 }
339
340#if 0 /* This is really too verbose */
341 swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
342 req->rq_disk->disk_name, req->cmd,
343 (long)blk_rq_pos(req), blk_rq_sectors(req),
344 req->buffer);
345 swim3_dbg(" errors=%d current_nr_sectors=%u\n",
346 req->errors, blk_rq_cur_sectors(req));
342#endif 347#endif
343 348
344 if (blk_rq_pos(req) >= fs->total_secs) { 349 if (blk_rq_pos(req) >= fs->total_secs) {
345 swim3_end_request_cur(-EIO); 350 swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
351 (long)blk_rq_pos(req), (long)fs->total_secs);
352 swim3_end_request(fs, -EIO, 0);
346 continue; 353 continue;
347 } 354 }
348 if (fs->ejected) { 355 if (fs->ejected) {
349 swim3_end_request_cur(-EIO); 356 swim3_dbg("%s", " disk ejected\n");
357 swim3_end_request(fs, -EIO, 0);
350 continue; 358 continue;
351 } 359 }
352 360
@@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
354 if (fs->write_prot < 0) 362 if (fs->write_prot < 0)
355 fs->write_prot = swim3_readbit(fs, WRITE_PROT); 363 fs->write_prot = swim3_readbit(fs, WRITE_PROT);
356 if (fs->write_prot) { 364 if (fs->write_prot) {
357 swim3_end_request_cur(-EIO); 365 swim3_dbg("%s", " try to write, disk write protected\n");
366 swim3_end_request(fs, -EIO, 0);
358 continue; 367 continue;
359 } 368 }
360 } 369 }
@@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
369 x = ((long)blk_rq_pos(req)) % fs->secpercyl; 378 x = ((long)blk_rq_pos(req)) % fs->secpercyl;
370 fs->head = x / fs->secpertrack; 379 fs->head = x / fs->secpertrack;
371 fs->req_sector = x % fs->secpertrack + 1; 380 fs->req_sector = x % fs->secpertrack + 1;
372 fd_req = req;
373 fs->state = do_transfer; 381 fs->state = do_transfer;
374 fs->retries = 0; 382 fs->retries = 0;
375 383
@@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
377 } 385 }
378} 386}
379 387
388static void do_fd_request(struct request_queue * q)
389{
390 start_request(q->queuedata);
391}
392
380static void set_timeout(struct floppy_state *fs, int nticks, 393static void set_timeout(struct floppy_state *fs, int nticks,
381 void (*proc)(unsigned long)) 394 void (*proc)(unsigned long))
382{ 395{
383 unsigned long flags;
384
385 spin_lock_irqsave(&fs->lock, flags);
386 if (fs->timeout_pending) 396 if (fs->timeout_pending)
387 del_timer(&fs->timeout); 397 del_timer(&fs->timeout);
388 fs->timeout.expires = jiffies + nticks; 398 fs->timeout.expires = jiffies + nticks;
@@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
390 fs->timeout.data = (unsigned long) fs; 400 fs->timeout.data = (unsigned long) fs;
391 add_timer(&fs->timeout); 401 add_timer(&fs->timeout);
392 fs->timeout_pending = 1; 402 fs->timeout_pending = 1;
393 spin_unlock_irqrestore(&fs->lock, flags);
394} 403}
395 404
396static inline void scan_track(struct floppy_state *fs) 405static inline void scan_track(struct floppy_state *fs)
@@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
442 struct swim3 __iomem *sw = fs->swim3; 451 struct swim3 __iomem *sw = fs->swim3;
443 struct dbdma_cmd *cp = fs->dma_cmd; 452 struct dbdma_cmd *cp = fs->dma_cmd;
444 struct dbdma_regs __iomem *dr = fs->dma; 453 struct dbdma_regs __iomem *dr = fs->dma;
454 struct request *req = fs->cur_req;
445 455
446 if (blk_rq_cur_sectors(fd_req) <= 0) { 456 if (blk_rq_cur_sectors(req) <= 0) {
447 printk(KERN_ERR "swim3: transfer 0 sectors?\n"); 457 swim3_warn("%s", "Transfer 0 sectors ?\n");
448 return; 458 return;
449 } 459 }
450 if (rq_data_dir(fd_req) == WRITE) 460 if (rq_data_dir(req) == WRITE)
451 n = 1; 461 n = 1;
452 else { 462 else {
453 n = fs->secpertrack - fs->req_sector + 1; 463 n = fs->secpertrack - fs->req_sector + 1;
454 if (n > blk_rq_cur_sectors(fd_req)) 464 if (n > blk_rq_cur_sectors(req))
455 n = blk_rq_cur_sectors(fd_req); 465 n = blk_rq_cur_sectors(req);
456 } 466 }
467
468 swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
469 fs->req_sector, fs->secpertrack, fs->head, n);
470
457 fs->scount = n; 471 fs->scount = n;
458 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); 472 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
459 out_8(&sw->sector, fs->req_sector); 473 out_8(&sw->sector, fs->req_sector);
460 out_8(&sw->nsect, n); 474 out_8(&sw->nsect, n);
461 out_8(&sw->gap3, 0); 475 out_8(&sw->gap3, 0);
462 out_le32(&dr->cmdptr, virt_to_bus(cp)); 476 out_le32(&dr->cmdptr, virt_to_bus(cp));
463 if (rq_data_dir(fd_req) == WRITE) { 477 if (rq_data_dir(req) == WRITE) {
464 /* Set up 3 dma commands: write preamble, data, postamble */ 478 /* Set up 3 dma commands: write preamble, data, postamble */
465 init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); 479 init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
466 ++cp; 480 ++cp;
467 init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512); 481 init_dma(cp, OUTPUT_MORE, req->buffer, 512);
468 ++cp; 482 ++cp;
469 init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); 483 init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
470 } else { 484 } else {
471 init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512); 485 init_dma(cp, INPUT_LAST, req->buffer, n * 512);
472 } 486 }
473 ++cp; 487 ++cp;
474 out_le16(&cp->command, DBDMA_STOP); 488 out_le16(&cp->command, DBDMA_STOP);
475 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); 489 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
476 in_8(&sw->error); 490 in_8(&sw->error);
477 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); 491 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
478 if (rq_data_dir(fd_req) == WRITE) 492 if (rq_data_dir(req) == WRITE)
479 out_8(&sw->control_bis, WRITE_SECTORS); 493 out_8(&sw->control_bis, WRITE_SECTORS);
480 in_8(&sw->intr); 494 in_8(&sw->intr);
481 out_le32(&dr->control, (RUN << 16) | RUN); 495 out_le32(&dr->control, (RUN << 16) | RUN);
@@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
488static void act(struct floppy_state *fs) 502static void act(struct floppy_state *fs)
489{ 503{
490 for (;;) { 504 for (;;) {
505 swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
506 fs->state, fs->req_cyl, fs->cur_cyl);
507
491 switch (fs->state) { 508 switch (fs->state) {
492 case idle: 509 case idle:
493 return; /* XXX shouldn't get here */ 510 return; /* XXX shouldn't get here */
494 511
495 case locating: 512 case locating:
496 if (swim3_readbit(fs, TRACK_ZERO)) { 513 if (swim3_readbit(fs, TRACK_ZERO)) {
514 swim3_dbg("%s", " locate track 0\n");
497 fs->cur_cyl = 0; 515 fs->cur_cyl = 0;
498 if (fs->req_cyl == 0) 516 if (fs->req_cyl == 0)
499 fs->state = do_transfer; 517 fs->state = do_transfer;
@@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
511 break; 529 break;
512 } 530 }
513 if (fs->req_cyl == fs->cur_cyl) { 531 if (fs->req_cyl == fs->cur_cyl) {
514 printk("whoops, seeking 0\n"); 532 swim3_warn("%s", "Whoops, seeking 0\n");
515 fs->state = do_transfer; 533 fs->state = do_transfer;
516 break; 534 break;
517 } 535 }
@@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
527 case do_transfer: 545 case do_transfer:
528 if (fs->cur_cyl != fs->req_cyl) { 546 if (fs->cur_cyl != fs->req_cyl) {
529 if (fs->retries > 5) { 547 if (fs->retries > 5) {
530 swim3_end_request_cur(-EIO); 548 swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
549 fs->req_cyl, fs->cur_cyl);
550 swim3_end_request(fs, -EIO, 0);
531 fs->state = idle; 551 fs->state = idle;
532 return; 552 return;
533 } 553 }
@@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
542 return; 562 return;
543 563
544 default: 564 default:
545 printk(KERN_ERR"swim3: unknown state %d\n", fs->state); 565 swim3_err("Unknown state %d\n", fs->state);
546 return; 566 return;
547 } 567 }
548 } 568 }
@@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
552{ 572{
553 struct floppy_state *fs = (struct floppy_state *) data; 573 struct floppy_state *fs = (struct floppy_state *) data;
554 struct swim3 __iomem *sw = fs->swim3; 574 struct swim3 __iomem *sw = fs->swim3;
575 unsigned long flags;
576
577 swim3_dbg("* scan timeout, state=%d\n", fs->state);
555 578
579 spin_lock_irqsave(&swim3_lock, flags);
556 fs->timeout_pending = 0; 580 fs->timeout_pending = 0;
557 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); 581 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
558 out_8(&sw->select, RELAX); 582 out_8(&sw->select, RELAX);
559 out_8(&sw->intr_enable, 0); 583 out_8(&sw->intr_enable, 0);
560 fs->cur_cyl = -1; 584 fs->cur_cyl = -1;
561 if (fs->retries > 5) { 585 if (fs->retries > 5) {
562 swim3_end_request_cur(-EIO); 586 swim3_end_request(fs, -EIO, 0);
563 fs->state = idle; 587 fs->state = idle;
564 start_request(fs); 588 start_request(fs);
565 } else { 589 } else {
566 fs->state = jogging; 590 fs->state = jogging;
567 act(fs); 591 act(fs);
568 } 592 }
593 spin_unlock_irqrestore(&swim3_lock, flags);
569} 594}
570 595
571static void seek_timeout(unsigned long data) 596static void seek_timeout(unsigned long data)
572{ 597{
573 struct floppy_state *fs = (struct floppy_state *) data; 598 struct floppy_state *fs = (struct floppy_state *) data;
574 struct swim3 __iomem *sw = fs->swim3; 599 struct swim3 __iomem *sw = fs->swim3;
600 unsigned long flags;
601
602 swim3_dbg("* seek timeout, state=%d\n", fs->state);
575 603
604 spin_lock_irqsave(&swim3_lock, flags);
576 fs->timeout_pending = 0; 605 fs->timeout_pending = 0;
577 out_8(&sw->control_bic, DO_SEEK); 606 out_8(&sw->control_bic, DO_SEEK);
578 out_8(&sw->select, RELAX); 607 out_8(&sw->select, RELAX);
579 out_8(&sw->intr_enable, 0); 608 out_8(&sw->intr_enable, 0);
580 printk(KERN_ERR "swim3: seek timeout\n"); 609 swim3_err("%s", "Seek timeout\n");
581 swim3_end_request_cur(-EIO); 610 swim3_end_request(fs, -EIO, 0);
582 fs->state = idle; 611 fs->state = idle;
583 start_request(fs); 612 start_request(fs);
613 spin_unlock_irqrestore(&swim3_lock, flags);
584} 614}
585 615
586static void settle_timeout(unsigned long data) 616static void settle_timeout(unsigned long data)
587{ 617{
588 struct floppy_state *fs = (struct floppy_state *) data; 618 struct floppy_state *fs = (struct floppy_state *) data;
589 struct swim3 __iomem *sw = fs->swim3; 619 struct swim3 __iomem *sw = fs->swim3;
620 unsigned long flags;
621
622 swim3_dbg("* settle timeout, state=%d\n", fs->state);
590 623
624 spin_lock_irqsave(&swim3_lock, flags);
591 fs->timeout_pending = 0; 625 fs->timeout_pending = 0;
592 if (swim3_readbit(fs, SEEK_COMPLETE)) { 626 if (swim3_readbit(fs, SEEK_COMPLETE)) {
593 out_8(&sw->select, RELAX); 627 out_8(&sw->select, RELAX);
594 fs->state = locating; 628 fs->state = locating;
595 act(fs); 629 act(fs);
596 return; 630 goto unlock;
597 } 631 }
598 out_8(&sw->select, RELAX); 632 out_8(&sw->select, RELAX);
599 if (fs->settle_time < 2*HZ) { 633 if (fs->settle_time < 2*HZ) {
600 ++fs->settle_time; 634 ++fs->settle_time;
601 set_timeout(fs, 1, settle_timeout); 635 set_timeout(fs, 1, settle_timeout);
602 return; 636 goto unlock;
603 } 637 }
604 printk(KERN_ERR "swim3: seek settle timeout\n"); 638 swim3_err("%s", "Seek settle timeout\n");
605 swim3_end_request_cur(-EIO); 639 swim3_end_request(fs, -EIO, 0);
606 fs->state = idle; 640 fs->state = idle;
607 start_request(fs); 641 start_request(fs);
642 unlock:
643 spin_unlock_irqrestore(&swim3_lock, flags);
608} 644}
609 645
610static void xfer_timeout(unsigned long data) 646static void xfer_timeout(unsigned long data)
@@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
612 struct floppy_state *fs = (struct floppy_state *) data; 648 struct floppy_state *fs = (struct floppy_state *) data;
613 struct swim3 __iomem *sw = fs->swim3; 649 struct swim3 __iomem *sw = fs->swim3;
614 struct dbdma_regs __iomem *dr = fs->dma; 650 struct dbdma_regs __iomem *dr = fs->dma;
651 unsigned long flags;
615 int n; 652 int n;
616 653
654 swim3_dbg("* xfer timeout, state=%d\n", fs->state);
655
656 spin_lock_irqsave(&swim3_lock, flags);
617 fs->timeout_pending = 0; 657 fs->timeout_pending = 0;
618 out_le32(&dr->control, RUN << 16); 658 out_le32(&dr->control, RUN << 16);
619 /* We must wait a bit for dbdma to stop */ 659 /* We must wait a bit for dbdma to stop */
@@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
622 out_8(&sw->intr_enable, 0); 662 out_8(&sw->intr_enable, 0);
623 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); 663 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
624 out_8(&sw->select, RELAX); 664 out_8(&sw->select, RELAX);
625 printk(KERN_ERR "swim3: timeout %sing sector %ld\n", 665 swim3_err("Timeout %sing sector %ld\n",
626 (rq_data_dir(fd_req)==WRITE? "writ": "read"), 666 (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
627 (long)blk_rq_pos(fd_req)); 667 (long)blk_rq_pos(fs->cur_req));
628 swim3_end_request_cur(-EIO); 668 swim3_end_request(fs, -EIO, 0);
629 fs->state = idle; 669 fs->state = idle;
630 start_request(fs); 670 start_request(fs);
671 spin_unlock_irqrestore(&swim3_lock, flags);
631} 672}
632 673
633static irqreturn_t swim3_interrupt(int irq, void *dev_id) 674static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
638 int stat, resid; 679 int stat, resid;
639 struct dbdma_regs __iomem *dr; 680 struct dbdma_regs __iomem *dr;
640 struct dbdma_cmd *cp; 681 struct dbdma_cmd *cp;
682 unsigned long flags;
683 struct request *req = fs->cur_req;
684
685 swim3_dbg("* interrupt, state=%d\n", fs->state);
641 686
687 spin_lock_irqsave(&swim3_lock, flags);
642 intr = in_8(&sw->intr); 688 intr = in_8(&sw->intr);
643 err = (intr & ERROR_INTR)? in_8(&sw->error): 0; 689 err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
644 if ((intr & ERROR_INTR) && fs->state != do_transfer) 690 if ((intr & ERROR_INTR) && fs->state != do_transfer)
645 printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n", 691 swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
646 fs->state, rq_data_dir(fd_req), intr, err); 692 fs->state, rq_data_dir(req), intr, err);
647 switch (fs->state) { 693 switch (fs->state) {
648 case locating: 694 case locating:
649 if (intr & SEEN_SECTOR) { 695 if (intr & SEEN_SECTOR) {
@@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
653 del_timer(&fs->timeout); 699 del_timer(&fs->timeout);
654 fs->timeout_pending = 0; 700 fs->timeout_pending = 0;
655 if (sw->ctrack == 0xff) { 701 if (sw->ctrack == 0xff) {
656 printk(KERN_ERR "swim3: seen sector but cyl=ff?\n"); 702 swim3_err("%s", "Seen sector but cyl=ff?\n");
657 fs->cur_cyl = -1; 703 fs->cur_cyl = -1;
658 if (fs->retries > 5) { 704 if (fs->retries > 5) {
659 swim3_end_request_cur(-EIO); 705 swim3_end_request(fs, -EIO, 0);
660 fs->state = idle; 706 fs->state = idle;
661 start_request(fs); 707 start_request(fs);
662 } else { 708 } else {
@@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
668 fs->cur_cyl = sw->ctrack; 714 fs->cur_cyl = sw->ctrack;
669 fs->cur_sector = sw->csect; 715 fs->cur_sector = sw->csect;
670 if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl) 716 if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
671 printk(KERN_ERR "swim3: expected cyl %d, got %d\n", 717 swim3_err("Expected cyl %d, got %d\n",
672 fs->expect_cyl, fs->cur_cyl); 718 fs->expect_cyl, fs->cur_cyl);
673 fs->state = do_transfer; 719 fs->state = do_transfer;
674 act(fs); 720 act(fs);
675 } 721 }
@@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
704 fs->timeout_pending = 0; 750 fs->timeout_pending = 0;
705 dr = fs->dma; 751 dr = fs->dma;
706 cp = fs->dma_cmd; 752 cp = fs->dma_cmd;
707 if (rq_data_dir(fd_req) == WRITE) 753 if (rq_data_dir(req) == WRITE)
708 ++cp; 754 ++cp;
709 /* 755 /*
710 * Check that the main data transfer has finished. 756 * Check that the main data transfer has finished.
@@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
729 if (intr & ERROR_INTR) { 775 if (intr & ERROR_INTR) {
730 n = fs->scount - 1 - resid / 512; 776 n = fs->scount - 1 - resid / 512;
731 if (n > 0) { 777 if (n > 0) {
732 blk_update_request(fd_req, 0, n << 9); 778 blk_update_request(req, 0, n << 9);
733 fs->req_sector += n; 779 fs->req_sector += n;
734 } 780 }
735 if (fs->retries < 5) { 781 if (fs->retries < 5) {
736 ++fs->retries; 782 ++fs->retries;
737 act(fs); 783 act(fs);
738 } else { 784 } else {
739 printk("swim3: error %sing block %ld (err=%x)\n", 785 swim3_err("Error %sing block %ld (err=%x)\n",
740 rq_data_dir(fd_req) == WRITE? "writ": "read", 786 rq_data_dir(req) == WRITE? "writ": "read",
741 (long)blk_rq_pos(fd_req), err); 787 (long)blk_rq_pos(req), err);
742 swim3_end_request_cur(-EIO); 788 swim3_end_request(fs, -EIO, 0);
743 fs->state = idle; 789 fs->state = idle;
744 } 790 }
745 } else { 791 } else {
746 if ((stat & ACTIVE) == 0 || resid != 0) { 792 if ((stat & ACTIVE) == 0 || resid != 0) {
747 /* musta been an error */ 793 /* musta been an error */
748 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid); 794 swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
749 printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n", 795 swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
750 fs->state, rq_data_dir(fd_req), intr, err); 796 fs->state, rq_data_dir(req), intr, err);
751 swim3_end_request_cur(-EIO); 797 swim3_end_request(fs, -EIO, 0);
752 fs->state = idle; 798 fs->state = idle;
753 start_request(fs); 799 start_request(fs);
754 break; 800 break;
755 } 801 }
756 if (swim3_end_request(0, fs->scount << 9)) { 802 fs->retries = 0;
803 if (swim3_end_request(fs, 0, fs->scount << 9)) {
757 fs->req_sector += fs->scount; 804 fs->req_sector += fs->scount;
758 if (fs->req_sector > fs->secpertrack) { 805 if (fs->req_sector > fs->secpertrack) {
759 fs->req_sector -= fs->secpertrack; 806 fs->req_sector -= fs->secpertrack;
@@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
770 start_request(fs); 817 start_request(fs);
771 break; 818 break;
772 default: 819 default:
773 printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state); 820 swim3_err("Don't know what to do in state %d\n", fs->state);
774 } 821 }
822 spin_unlock_irqrestore(&swim3_lock, flags);
775 return IRQ_HANDLED; 823 return IRQ_HANDLED;
776} 824}
777 825
@@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
781} 829}
782*/ 830*/
783 831
832/* Called under the mutex to grab exclusive access to a drive */
784static int grab_drive(struct floppy_state *fs, enum swim_state state, 833static int grab_drive(struct floppy_state *fs, enum swim_state state,
785 int interruptible) 834 int interruptible)
786{ 835{
787 unsigned long flags; 836 unsigned long flags;
788 837
789 spin_lock_irqsave(&fs->lock, flags); 838 swim3_dbg("%s", "-> grab drive\n");
790 if (fs->state != idle) { 839
840 spin_lock_irqsave(&swim3_lock, flags);
841 if (fs->state != idle && fs->state != available) {
791 ++fs->wanted; 842 ++fs->wanted;
792 while (fs->state != available) { 843 while (fs->state != available) {
844 spin_unlock_irqrestore(&swim3_lock, flags);
793 if (interruptible && signal_pending(current)) { 845 if (interruptible && signal_pending(current)) {
794 --fs->wanted; 846 --fs->wanted;
795 spin_unlock_irqrestore(&fs->lock, flags);
796 return -EINTR; 847 return -EINTR;
797 } 848 }
798 interruptible_sleep_on(&fs->wait); 849 interruptible_sleep_on(&fs->wait);
850 spin_lock_irqsave(&swim3_lock, flags);
799 } 851 }
800 --fs->wanted; 852 --fs->wanted;
801 } 853 }
802 fs->state = state; 854 fs->state = state;
803 spin_unlock_irqrestore(&fs->lock, flags); 855 spin_unlock_irqrestore(&swim3_lock, flags);
856
804 return 0; 857 return 0;
805} 858}
806 859
@@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
808{ 861{
809 unsigned long flags; 862 unsigned long flags;
810 863
811 spin_lock_irqsave(&fs->lock, flags); 864 swim3_dbg("%s", "-> release drive\n");
865
866 spin_lock_irqsave(&swim3_lock, flags);
812 fs->state = idle; 867 fs->state = idle;
813 start_request(fs); 868 start_request(fs);
814 spin_unlock_irqrestore(&fs->lock, flags); 869 spin_unlock_irqrestore(&swim3_lock, flags);
815} 870}
816 871
817static int fd_eject(struct floppy_state *fs) 872static int fd_eject(struct floppy_state *fs)
@@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
966{ 1021{
967 struct floppy_state *fs = disk->private_data; 1022 struct floppy_state *fs = disk->private_data;
968 struct swim3 __iomem *sw = fs->swim3; 1023 struct swim3 __iomem *sw = fs->swim3;
1024
969 mutex_lock(&swim3_mutex); 1025 mutex_lock(&swim3_mutex);
970 if (fs->ref_count > 0 && --fs->ref_count == 0) { 1026 if (fs->ref_count > 0 && --fs->ref_count == 0) {
971 swim3_action(fs, MOTOR_OFF); 1027 swim3_action(fs, MOTOR_OFF);
@@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
1031 .revalidate_disk= floppy_revalidate, 1087 .revalidate_disk= floppy_revalidate,
1032}; 1088};
1033 1089
1090static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
1091{
1092 struct floppy_state *fs = macio_get_drvdata(mdev);
1093 struct swim3 __iomem *sw = fs->swim3;
1094
1095 if (!fs)
1096 return;
1097 if (mb_state != MB_FD)
1098 return;
1099
1100 /* Clear state */
1101 out_8(&sw->intr_enable, 0);
1102 in_8(&sw->intr);
1103 in_8(&sw->error);
1104}
1105
1034static int swim3_add_device(struct macio_dev *mdev, int index) 1106static int swim3_add_device(struct macio_dev *mdev, int index)
1035{ 1107{
1036 struct device_node *swim = mdev->ofdev.dev.of_node; 1108 struct device_node *swim = mdev->ofdev.dev.of_node;
1037 struct floppy_state *fs = &floppy_states[index]; 1109 struct floppy_state *fs = &floppy_states[index];
1038 int rc = -EBUSY; 1110 int rc = -EBUSY;
1039 1111
1112 /* Do this first for message macros */
1113 memset(fs, 0, sizeof(*fs));
1114 fs->mdev = mdev;
1115 fs->index = index;
1116
1040 /* Check & Request resources */ 1117 /* Check & Request resources */
1041 if (macio_resource_count(mdev) < 2) { 1118 if (macio_resource_count(mdev) < 2) {
1042 printk(KERN_WARNING "ifd%d: no address for %s\n", 1119 swim3_err("%s", "No address in device-tree\n");
1043 index, swim->full_name);
1044 return -ENXIO; 1120 return -ENXIO;
1045 } 1121 }
1046 if (macio_irq_count(mdev) < 2) { 1122 if (macio_irq_count(mdev) < 1) {
1047 printk(KERN_WARNING "fd%d: no intrs for device %s\n", 1123 swim3_err("%s", "No interrupt in device-tree\n");
1048 index, swim->full_name); 1124 return -ENXIO;
1049 } 1125 }
1050 if (macio_request_resource(mdev, 0, "swim3 (mmio)")) { 1126 if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
1051 printk(KERN_ERR "fd%d: can't request mmio resource for %s\n", 1127 swim3_err("%s", "Can't request mmio resource\n");
1052 index, swim->full_name);
1053 return -EBUSY; 1128 return -EBUSY;
1054 } 1129 }
1055 if (macio_request_resource(mdev, 1, "swim3 (dma)")) { 1130 if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
1056 printk(KERN_ERR "fd%d: can't request dma resource for %s\n", 1131 swim3_err("%s", "Can't request dma resource\n");
1057 index, swim->full_name);
1058 macio_release_resource(mdev, 0); 1132 macio_release_resource(mdev, 0);
1059 return -EBUSY; 1133 return -EBUSY;
1060 } 1134 }
@@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
1063 if (mdev->media_bay == NULL) 1137 if (mdev->media_bay == NULL)
1064 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1); 1138 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
1065 1139
1066 memset(fs, 0, sizeof(*fs));
1067 spin_lock_init(&fs->lock);
1068 fs->state = idle; 1140 fs->state = idle;
1069 fs->swim3 = (struct swim3 __iomem *) 1141 fs->swim3 = (struct swim3 __iomem *)
1070 ioremap(macio_resource_start(mdev, 0), 0x200); 1142 ioremap(macio_resource_start(mdev, 0), 0x200);
1071 if (fs->swim3 == NULL) { 1143 if (fs->swim3 == NULL) {
1072 printk("fd%d: couldn't map registers for %s\n", 1144 swim3_err("%s", "Couldn't map mmio registers\n");
1073 index, swim->full_name);
1074 rc = -ENOMEM; 1145 rc = -ENOMEM;
1075 goto out_release; 1146 goto out_release;
1076 } 1147 }
1077 fs->dma = (struct dbdma_regs __iomem *) 1148 fs->dma = (struct dbdma_regs __iomem *)
1078 ioremap(macio_resource_start(mdev, 1), 0x200); 1149 ioremap(macio_resource_start(mdev, 1), 0x200);
1079 if (fs->dma == NULL) { 1150 if (fs->dma == NULL) {
1080 printk("fd%d: couldn't map DMA for %s\n", 1151 swim3_err("%s", "Couldn't map dma registers\n");
1081 index, swim->full_name);
1082 iounmap(fs->swim3); 1152 iounmap(fs->swim3);
1083 rc = -ENOMEM; 1153 rc = -ENOMEM;
1084 goto out_release; 1154 goto out_release;
@@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
1090 fs->secpercyl = 36; 1160 fs->secpercyl = 36;
1091 fs->secpertrack = 18; 1161 fs->secpertrack = 18;
1092 fs->total_secs = 2880; 1162 fs->total_secs = 2880;
1093 fs->mdev = mdev;
1094 init_waitqueue_head(&fs->wait); 1163 init_waitqueue_head(&fs->wait);
1095 1164
1096 fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); 1165 fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
1097 memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd)); 1166 memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
1098 st_le16(&fs->dma_cmd[1].command, DBDMA_STOP); 1167 st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
1099 1168
1169 if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
1170 swim3_mb_event(mdev, MB_FD);
1171
1100 if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) { 1172 if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
1101 printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n", 1173 swim3_err("%s", "Couldn't request interrupt\n");
1102 index, fs->swim3_intr, swim->full_name);
1103 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0); 1174 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
1104 goto out_unmap; 1175 goto out_unmap;
1105 return -EBUSY; 1176 return -EBUSY;
1106 } 1177 }
1107/*
1108 if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
1109 printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
1110 fs->dma_intr);
1111 return -EBUSY;
1112 }
1113*/
1114 1178
1115 init_timer(&fs->timeout); 1179 init_timer(&fs->timeout);
1116 1180
1117 printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count, 1181 swim3_info("SWIM3 floppy controller %s\n",
1118 mdev->media_bay ? "in media bay" : ""); 1182 mdev->media_bay ? "in media bay" : "");
1119 1183
1120 return 0; 1184 return 0;
@@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
1132 1196
1133static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match) 1197static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
1134{ 1198{
1135 int i, rc;
1136 struct gendisk *disk; 1199 struct gendisk *disk;
1200 int index, rc;
1201
1202 index = floppy_count++;
1203 if (index >= MAX_FLOPPIES)
1204 return -ENXIO;
1137 1205
1138 /* Add the drive */ 1206 /* Add the drive */
1139 rc = swim3_add_device(mdev, floppy_count); 1207 rc = swim3_add_device(mdev, index);
1140 if (rc) 1208 if (rc)
1141 return rc; 1209 return rc;
1210 /* Now register that disk. Same comment about failure handling */
1211 disk = disks[index] = alloc_disk(1);
1212 if (disk == NULL)
1213 return -ENOMEM;
1214 disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
1215 if (disk->queue == NULL) {
1216 put_disk(disk);
1217 return -ENOMEM;
1218 }
1219 disk->queue->queuedata = &floppy_states[index];
1142 1220
1143 /* Now create the queue if not there yet */ 1221 if (index == 0) {
1144 if (swim3_queue == NULL) {
1145 /* If we failed, there isn't much we can do as the driver is still 1222 /* If we failed, there isn't much we can do as the driver is still
1146 * too dumb to remove the device, just bail out 1223 * too dumb to remove the device, just bail out
1147 */ 1224 */
1148 if (register_blkdev(FLOPPY_MAJOR, "fd")) 1225 if (register_blkdev(FLOPPY_MAJOR, "fd"))
1149 return 0; 1226 return 0;
1150 swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
1151 if (swim3_queue == NULL) {
1152 unregister_blkdev(FLOPPY_MAJOR, "fd");
1153 return 0;
1154 }
1155 } 1227 }
1156 1228
1157 /* Now register that disk. Same comment about failure handling */
1158 i = floppy_count++;
1159 disk = disks[i] = alloc_disk(1);
1160 if (disk == NULL)
1161 return 0;
1162
1163 disk->major = FLOPPY_MAJOR; 1229 disk->major = FLOPPY_MAJOR;
1164 disk->first_minor = i; 1230 disk->first_minor = index;
1165 disk->fops = &floppy_fops; 1231 disk->fops = &floppy_fops;
1166 disk->private_data = &floppy_states[i]; 1232 disk->private_data = &floppy_states[index];
1167 disk->queue = swim3_queue;
1168 disk->flags |= GENHD_FL_REMOVABLE; 1233 disk->flags |= GENHD_FL_REMOVABLE;
1169 sprintf(disk->disk_name, "fd%d", i); 1234 sprintf(disk->disk_name, "fd%d", index);
1170 set_capacity(disk, 2880); 1235 set_capacity(disk, 2880);
1171 add_disk(disk); 1236 add_disk(disk);
1172 1237
@@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
1194 .of_match_table = swim3_match, 1259 .of_match_table = swim3_match,
1195 }, 1260 },
1196 .probe = swim3_attach, 1261 .probe = swim3_attach,
1262#ifdef CONFIG_PMAC_MEDIABAY
1263 .mediabay_event = swim3_mb_event,
1264#endif
1197#if 0 1265#if 0
1198 .suspend = swim3_suspend, 1266 .suspend = swim3_suspend,
1199 .resume = swim3_resume, 1267 .resume = swim3_resume,
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 11b41fd40c27..5ccf142ef0b8 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -188,7 +188,7 @@ config BT_MRVL
188 The core driver to support Marvell Bluetooth devices. 188 The core driver to support Marvell Bluetooth devices.
189 189
190 This driver is required if you want to support 190 This driver is required if you want to support
191 Marvell Bluetooth devices, such as 8688/8787. 191 Marvell Bluetooth devices, such as 8688/8787/8797.
192 192
193 Say Y here to compile Marvell Bluetooth driver 193 Say Y here to compile Marvell Bluetooth driver
194 into the kernel or say M to compile it as module. 194 into the kernel or say M to compile it as module.
@@ -201,8 +201,8 @@ config BT_MRVL_SDIO
201 The driver for Marvell Bluetooth chipsets with SDIO interface. 201 The driver for Marvell Bluetooth chipsets with SDIO interface.
202 202
203 This driver is required if you want to use Marvell Bluetooth 203 This driver is required if you want to use Marvell Bluetooth
204 devices with SDIO interface. Currently SD8688/SD8787 chipsets are 204 devices with SDIO interface. Currently SD8688/SD8787/SD8797
205 supported. 205 chipsets are supported.
206 206
207 Say Y here to compile support for Marvell BT-over-SDIO driver 207 Say Y here to compile support for Marvell BT-over-SDIO driver
208 into the kernel or say M to compile it as module. 208 into the kernel or say M to compile it as module.
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 9ef48167e2cf..27b74b0d547b 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -65,7 +65,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
65 .io_port_1 = 0x01, 65 .io_port_1 = 0x01,
66 .io_port_2 = 0x02, 66 .io_port_2 = 0x02,
67}; 67};
68static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = { 68static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
69 .cfg = 0x00, 69 .cfg = 0x00,
70 .host_int_mask = 0x02, 70 .host_int_mask = 0x02,
71 .host_intstatus = 0x03, 71 .host_intstatus = 0x03,
@@ -92,7 +92,14 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
92static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = { 92static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
93 .helper = NULL, 93 .helper = NULL,
94 .firmware = "mrvl/sd8787_uapsta.bin", 94 .firmware = "mrvl/sd8787_uapsta.bin",
95 .reg = &btmrvl_reg_8787, 95 .reg = &btmrvl_reg_87xx,
96 .sd_blksz_fw_dl = 256,
97};
98
99static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
100 .helper = NULL,
101 .firmware = "mrvl/sd8797_uapsta.bin",
102 .reg = &btmrvl_reg_87xx,
96 .sd_blksz_fw_dl = 256, 103 .sd_blksz_fw_dl = 256,
97}; 104};
98 105
@@ -103,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
103 /* Marvell SD8787 Bluetooth device */ 110 /* Marvell SD8787 Bluetooth device */
104 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A), 111 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
105 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, 112 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
113 /* Marvell SD8797 Bluetooth device */
114 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
115 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
106 116
107 { } /* Terminating entry */ 117 { } /* Terminating entry */
108}; 118};
@@ -1076,3 +1086,4 @@ MODULE_LICENSE("GPL v2");
1076MODULE_FIRMWARE("sd8688_helper.bin"); 1086MODULE_FIRMWARE("sd8688_helper.bin");
1077MODULE_FIRMWARE("sd8688.bin"); 1087MODULE_FIRMWARE("sd8688.bin");
1078MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); 1088MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
1089MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f9b726091ad0..eabc437ce500 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -100,6 +100,9 @@ static struct usb_device_id btusb_table[] = {
100 /* Canyon CN-BTU1 with HID interfaces */ 100 /* Canyon CN-BTU1 with HID interfaces */
101 { USB_DEVICE(0x0c10, 0x0000) }, 101 { USB_DEVICE(0x0c10, 0x0000) },
102 102
103 /* Broadcom BCM20702A0 */
104 { USB_DEVICE(0x413c, 0x8197) },
105
103 { } /* Terminating entry */ 106 { } /* Terminating entry */
104}; 107};
105 108
@@ -774,9 +777,8 @@ skip_waking:
774 usb_mark_last_busy(data->udev); 777 usb_mark_last_busy(data->udev);
775 } 778 }
776 779
777 usb_free_urb(urb);
778
779done: 780done:
781 usb_free_urb(urb);
780 return err; 782 return err;
781} 783}
782 784
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 66cd0b8096ca..c92424ca1a55 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1186,10 +1186,11 @@ static void gen6_cleanup(void)
1186/* Certain Gen5 chipsets require require idling the GPU before 1186/* Certain Gen5 chipsets require require idling the GPU before
1187 * unmapping anything from the GTT when VT-d is enabled. 1187 * unmapping anything from the GTT when VT-d is enabled.
1188 */ 1188 */
1189extern int intel_iommu_gfx_mapped;
1190static inline int needs_idle_maps(void) 1189static inline int needs_idle_maps(void)
1191{ 1190{
1191#ifdef CONFIG_INTEL_IOMMU
1192 const unsigned short gpu_devid = intel_private.pcidev->device; 1192 const unsigned short gpu_devid = intel_private.pcidev->device;
1193 extern int intel_iommu_gfx_mapped;
1193 1194
1194 /* Query intel_iommu to see if we need the workaround. Presumably that 1195 /* Query intel_iommu to see if we need the workaround. Presumably that
1195 * was loaded first. 1196 * was loaded first.
@@ -1198,7 +1199,7 @@ static inline int needs_idle_maps(void)
1198 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && 1199 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
1199 intel_iommu_gfx_mapped) 1200 intel_iommu_gfx_mapped)
1200 return 1; 1201 return 1;
1201 1202#endif
1202 return 0; 1203 return 0;
1203} 1204}
1204 1205
@@ -1236,7 +1237,7 @@ static int i9xx_setup(void)
1236 intel_private.gtt_bus_addr = reg_addr + gtt_offset; 1237 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1237 } 1238 }
1238 1239
1239 if (needs_idle_maps()); 1240 if (needs_idle_maps())
1240 intel_private.base.do_idle_maps = 1; 1241 intel_private.base.do_idle_maps = 1;
1241 1242
1242 intel_i9xx_setup_flush(); 1243 intel_i9xx_setup_flush();
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 63e19ba56bbe..6035ab8d5ef7 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -941,7 +941,7 @@ void get_random_bytes(void *buf, int nbytes)
941 if (!arch_get_random_long(&v)) 941 if (!arch_get_random_long(&v))
942 break; 942 break;
943 943
944 memcpy(buf, &v, chunk); 944 memcpy(p, &v, chunk);
945 p += chunk; 945 p += chunk;
946 nbytes -= chunk; 946 nbytes -= chunk;
947 } 947 }
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
index edaa987621ea..f5002015d82e 100644
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ b/drivers/cpufreq/db8500-cpufreq.c
@@ -109,7 +109,7 @@ static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
109 109
110static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy) 110static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
111{ 111{
112 int res; 112 int i, res;
113 113
114 BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table)); 114 BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table));
115 115
@@ -120,8 +120,8 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
120 freq_table[3].frequency = 1000000; 120 freq_table[3].frequency = 1000000;
121 } 121 }
122 pr_info("db8500-cpufreq : Available frequencies:\n"); 122 pr_info("db8500-cpufreq : Available frequencies:\n");
123 while (freq_table[i].frequency != CPUFREQ_TABLE_END) 123 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
124 pr_info(" %d Mhz\n", freq_table[i++].frequency/1000); 124 pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
125 125
126 /* get policy fields based on the table */ 126 /* get policy fields based on the table */
127 res = cpufreq_frequency_table_cpuinfo(policy, freq_table); 127 res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 5c6f56f21443..dcd8babae9eb 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -343,11 +343,13 @@ static void mv_process_hash_current(int first_block)
343 else 343 else
344 op.config |= CFG_MID_FRAG; 344 op.config |= CFG_MID_FRAG;
345 345
346 writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); 346 if (first_block) {
347 writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); 347 writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
348 writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); 348 writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
349 writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); 349 writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
350 writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); 350 writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
351 writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
352 }
351 } 353 }
352 354
353 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); 355 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 643b055ed3cd..8f0491037080 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -1,36 +1,29 @@
1config ARCH_HAS_DEVFREQ
2 bool
3 depends on ARCH_HAS_OPP
4 help
5 Denotes that the architecture supports DEVFREQ. If the architecture
6 supports multiple OPP entries per device and the frequency of the
7 devices with OPPs may be altered dynamically, the architecture
8 supports DEVFREQ.
9
10menuconfig PM_DEVFREQ 1menuconfig PM_DEVFREQ
11 bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" 2 bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
12 depends on PM_OPP && ARCH_HAS_DEVFREQ
13 help 3 help
14 With OPP support, a device may have a list of frequencies and 4 A device may have a list of frequencies and voltages available.
15 voltages available. DEVFREQ, a generic DVFS framework can be 5 devfreq, a generic DVFS framework can be registered for a device
16 registered for a device with OPP support in order to let the 6 in order to let the governor provided to devfreq choose an
17 governor provided to DEVFREQ choose an operating frequency 7 operating frequency based on the device driver's policy.
18 based on the OPP's list and the policy given with DEVFREQ.
19 8
20 Each device may have its own governor and policy. DEVFREQ can 9 Each device may have its own governor and policy. Devfreq can
21 reevaluate the device state periodically and/or based on the 10 reevaluate the device state periodically and/or based on the
22 OPP list changes (each frequency/voltage pair in OPP may be 11 notification to "nb", a notifier block, of devfreq.
23 disabled or enabled).
24 12
25 Like some CPUs with CPUFREQ, a device may have multiple clocks. 13 Like some CPUs with CPUfreq, a device may have multiple clocks.
26 However, because the clock frequencies of a single device are 14 However, because the clock frequencies of a single device are
27 determined by the single device's state, an instance of DEVFREQ 15 determined by the single device's state, an instance of devfreq
28 is attached to a single device and returns a "representative" 16 is attached to a single device and returns a "representative"
29 clock frequency from the OPP of the device, which is also attached 17 clock frequency of the device, which is also attached
30 to a device by 1-to-1. The device registering DEVFREQ takes the 18 to a device by 1-to-1. The device registering devfreq takes the
31 responsiblity to "interpret" the frequency listed in OPP and 19 responsiblity to "interpret" the representative frequency and
32 to set its every clock accordingly with the "target" callback 20 to set its every clock accordingly with the "target" callback
33 given to DEVFREQ. 21 given to devfreq.
22
23 When OPP is used with the devfreq device, it is recommended to
24 register devfreq's nb to the OPP's notifier head. If OPP is
25 used with the devfreq device, you may use OPP helper
26 functions defined in devfreq.h.
34 27
35if PM_DEVFREQ 28if PM_DEVFREQ
36 29
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 5d15b812377b..59d24e9cb8c5 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -15,7 +15,9 @@
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/module.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/stat.h>
19#include <linux/opp.h> 21#include <linux/opp.h>
20#include <linux/devfreq.h> 22#include <linux/devfreq.h>
21#include <linux/workqueue.h> 23#include <linux/workqueue.h>
@@ -416,10 +418,14 @@ out:
416 */ 418 */
417int devfreq_remove_device(struct devfreq *devfreq) 419int devfreq_remove_device(struct devfreq *devfreq)
418{ 420{
421 bool central_polling;
422
419 if (!devfreq) 423 if (!devfreq)
420 return -EINVAL; 424 return -EINVAL;
421 425
422 if (!devfreq->governor->no_central_polling) { 426 central_polling = !devfreq->governor->no_central_polling;
427
428 if (central_polling) {
423 mutex_lock(&devfreq_list_lock); 429 mutex_lock(&devfreq_list_lock);
424 while (wait_remove_device == devfreq) { 430 while (wait_remove_device == devfreq) {
425 mutex_unlock(&devfreq_list_lock); 431 mutex_unlock(&devfreq_list_lock);
@@ -431,7 +437,7 @@ int devfreq_remove_device(struct devfreq *devfreq)
431 mutex_lock(&devfreq->lock); 437 mutex_lock(&devfreq->lock);
432 _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */ 438 _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
433 439
434 if (!devfreq->governor->no_central_polling) 440 if (central_polling)
435 mutex_unlock(&devfreq_list_lock); 441 mutex_unlock(&devfreq_list_lock);
436 442
437 return 0; 443 return 0;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 8af8e864a9cf..73464a62adf7 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1128,7 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
1128 { .compatible = "fsl,p1020-memory-controller", }, 1128 { .compatible = "fsl,p1020-memory-controller", },
1129 { .compatible = "fsl,p1021-memory-controller", }, 1129 { .compatible = "fsl,p1021-memory-controller", },
1130 { .compatible = "fsl,p2020-memory-controller", }, 1130 { .compatible = "fsl,p2020-memory-controller", },
1131 { .compatible = "fsl,p4080-memory-controller", }, 1131 { .compatible = "fsl,qoriq-memory-controller", },
1132 {}, 1132 {},
1133}; 1133};
1134MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match); 1134MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index bcb1126e3d00..153980be4ee6 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -585,14 +585,12 @@ int dmi_name_in_serial(const char *str)
585} 585}
586 586
587/** 587/**
588 * dmi_name_in_vendors - Check if string is anywhere in the DMI vendor information. 588 * dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
589 * @str: Case sensitive Name 589 * @str: Case sensitive Name
590 */ 590 */
591int dmi_name_in_vendors(const char *str) 591int dmi_name_in_vendors(const char *str)
592{ 592{
593 static int fields[] = { DMI_BIOS_VENDOR, DMI_BIOS_VERSION, DMI_SYS_VENDOR, 593 static int fields[] = { DMI_SYS_VENDOR, DMI_BOARD_VENDOR, DMI_NONE };
594 DMI_PRODUCT_NAME, DMI_PRODUCT_VERSION, DMI_BOARD_VENDOR,
595 DMI_BOARD_NAME, DMI_BOARD_VERSION, DMI_NONE };
596 int i; 594 int i;
597 for (i = 0; fields[i] != DMI_NONE; i++) { 595 for (i = 0; fields[i] != DMI_NONE; i++) {
598 int f = fields[i]; 596 int f = fields[i];
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 8370f72d87ff..b0a81173a268 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -457,7 +457,8 @@ static int efi_pstore_close(struct pstore_info *psi)
457} 457}
458 458
459static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, 459static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
460 struct timespec *timespec, struct pstore_info *psi) 460 struct timespec *timespec,
461 char **buf, struct pstore_info *psi)
461{ 462{
462 efi_guid_t vendor = LINUX_EFI_CRASH_GUID; 463 efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
463 struct efivars *efivars = psi->data; 464 struct efivars *efivars = psi->data;
@@ -478,7 +479,11 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
478 timespec->tv_nsec = 0; 479 timespec->tv_nsec = 0;
479 get_var_data_locked(efivars, &efivars->walk_entry->var); 480 get_var_data_locked(efivars, &efivars->walk_entry->var);
480 size = efivars->walk_entry->var.DataSize; 481 size = efivars->walk_entry->var.DataSize;
481 memcpy(psi->buf, efivars->walk_entry->var.Data, size); 482 *buf = kmalloc(size, GFP_KERNEL);
483 if (*buf == NULL)
484 return -ENOMEM;
485 memcpy(*buf, efivars->walk_entry->var.Data,
486 size);
482 efivars->walk_entry = list_entry(efivars->walk_entry->list.next, 487 efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
483 struct efivar_entry, list); 488 struct efivar_entry, list);
484 return size; 489 return size;
@@ -576,7 +581,8 @@ static int efi_pstore_close(struct pstore_info *psi)
576} 581}
577 582
578static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, 583static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
579 struct timespec *time, struct pstore_info *psi) 584 struct timespec *timespec,
585 char **buf, struct pstore_info *psi)
580{ 586{
581 return -1; 587 return -1;
582} 588}
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c811cb107904..2cce44a1d7d0 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
746 ibft_cleanup(); 746 ibft_cleanup();
747} 747}
748 748
749#ifdef CONFIG_ACPI
750static const struct {
751 char *sign;
752} ibft_signs[] = {
753 /*
754 * One spec says "IBFT", the other says "iBFT". We have to check
755 * for both.
756 */
757 { ACPI_SIG_IBFT },
758 { "iBFT" },
759};
760
761static void __init acpi_find_ibft_region(void)
762{
763 int i;
764 struct acpi_table_header *table = NULL;
765
766 if (acpi_disabled)
767 return;
768
769 for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
770 acpi_get_table(ibft_signs[i].sign, 0, &table);
771 ibft_addr = (struct acpi_table_ibft *)table;
772 }
773}
774#else
775static void __init acpi_find_ibft_region(void)
776{
777}
778#endif
779
749/* 780/*
750 * ibft_init() - creates sysfs tree entries for the iBFT data. 781 * ibft_init() - creates sysfs tree entries for the iBFT data.
751 */ 782 */
@@ -753,9 +784,16 @@ static int __init ibft_init(void)
753{ 784{
754 int rc = 0; 785 int rc = 0;
755 786
787 /*
788 As on UEFI systems the setup_arch()/find_ibft_region()
789 is called before ACPI tables are parsed and it only does
790 legacy finding.
791 */
792 if (!ibft_addr)
793 acpi_find_ibft_region();
794
756 if (ibft_addr) { 795 if (ibft_addr) {
757 printk(KERN_INFO "iBFT detected at 0x%llx.\n", 796 pr_info("iBFT detected.\n");
758 (u64)isa_virt_to_bus(ibft_addr));
759 797
760 rc = ibft_check_device(); 798 rc = ibft_check_device();
761 if (rc) 799 if (rc)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index bfe723266fd8..4da4eb9ae926 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
45static const struct { 45static const struct {
46 char *sign; 46 char *sign;
47} ibft_signs[] = { 47} ibft_signs[] = {
48#ifdef CONFIG_ACPI
49 /*
50 * One spec says "IBFT", the other says "iBFT". We have to check
51 * for both.
52 */
53 { ACPI_SIG_IBFT },
54#endif
55 { "iBFT" }, 48 { "iBFT" },
56 { "BIFT" }, /* Broadcom iSCSI Offload */ 49 { "BIFT" }, /* Broadcom iSCSI Offload */
57}; 50};
@@ -62,14 +55,6 @@ static const struct {
62#define VGA_MEM 0xA0000 /* VGA buffer */ 55#define VGA_MEM 0xA0000 /* VGA buffer */
63#define VGA_SIZE 0x20000 /* 128kB */ 56#define VGA_SIZE 0x20000 /* 128kB */
64 57
65#ifdef CONFIG_ACPI
66static int __init acpi_find_ibft(struct acpi_table_header *header)
67{
68 ibft_addr = (struct acpi_table_ibft *)header;
69 return 0;
70}
71#endif /* CONFIG_ACPI */
72
73static int __init find_ibft_in_mem(void) 58static int __init find_ibft_in_mem(void)
74{ 59{
75 unsigned long pos; 60 unsigned long pos;
@@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
94 * the table cannot be valid. */ 79 * the table cannot be valid. */
95 if (pos + len <= (IBFT_END-1)) { 80 if (pos + len <= (IBFT_END-1)) {
96 ibft_addr = (struct acpi_table_ibft *)virt; 81 ibft_addr = (struct acpi_table_ibft *)virt;
82 pr_info("iBFT found at 0x%lx.\n", pos);
97 goto done; 83 goto done;
98 } 84 }
99 } 85 }
@@ -108,20 +94,12 @@ done:
108 */ 94 */
109unsigned long __init find_ibft_region(unsigned long *sizep) 95unsigned long __init find_ibft_region(unsigned long *sizep)
110{ 96{
111#ifdef CONFIG_ACPI
112 int i;
113#endif
114 ibft_addr = NULL; 97 ibft_addr = NULL;
115 98
116#ifdef CONFIG_ACPI
117 for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
118 acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
119#endif /* CONFIG_ACPI */
120
121 /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will 99 /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
122 * only use ACPI for this */ 100 * only use ACPI for this */
123 101
124 if (!ibft_addr && !efi_enabled) 102 if (!efi_enabled)
125 find_ibft_in_mem(); 103 find_ibft_in_mem();
126 104
127 if (ibft_addr) { 105 if (ibft_addr) {
diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c
index f10fc521951b..1eedb6f7fdab 100644
--- a/drivers/firmware/sigma.c
+++ b/drivers/firmware/sigma.c
@@ -14,13 +14,34 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/sigma.h> 15#include <linux/sigma.h>
16 16
17/* Return: 0==OK, <0==error, =1 ==no more actions */ 17static size_t sigma_action_size(struct sigma_action *sa)
18{
19 size_t payload = 0;
20
21 switch (sa->instr) {
22 case SIGMA_ACTION_WRITEXBYTES:
23 case SIGMA_ACTION_WRITESINGLE:
24 case SIGMA_ACTION_WRITESAFELOAD:
25 payload = sigma_action_len(sa);
26 break;
27 default:
28 break;
29 }
30
31 payload = ALIGN(payload, 2);
32
33 return payload + sizeof(struct sigma_action);
34}
35
36/*
37 * Returns a negative error value in case of an error, 0 if processing of
38 * the firmware should be stopped after this action, 1 otherwise.
39 */
18static int 40static int
19process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw) 41process_sigma_action(struct i2c_client *client, struct sigma_action *sa)
20{ 42{
21 struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
22 size_t len = sigma_action_len(sa); 43 size_t len = sigma_action_len(sa);
23 int ret = 0; 44 int ret;
24 45
25 pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__, 46 pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
26 sa->instr, sa->addr, len); 47 sa->instr, sa->addr, len);
@@ -29,44 +50,50 @@ process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
29 case SIGMA_ACTION_WRITEXBYTES: 50 case SIGMA_ACTION_WRITEXBYTES:
30 case SIGMA_ACTION_WRITESINGLE: 51 case SIGMA_ACTION_WRITESINGLE:
31 case SIGMA_ACTION_WRITESAFELOAD: 52 case SIGMA_ACTION_WRITESAFELOAD:
32 if (ssfw->fw->size < ssfw->pos + len)
33 return -EINVAL;
34 ret = i2c_master_send(client, (void *)&sa->addr, len); 53 ret = i2c_master_send(client, (void *)&sa->addr, len);
35 if (ret < 0) 54 if (ret < 0)
36 return -EINVAL; 55 return -EINVAL;
37 break; 56 break;
38
39 case SIGMA_ACTION_DELAY: 57 case SIGMA_ACTION_DELAY:
40 ret = 0;
41 udelay(len); 58 udelay(len);
42 len = 0; 59 len = 0;
43 break; 60 break;
44
45 case SIGMA_ACTION_END: 61 case SIGMA_ACTION_END:
46 return 1; 62 return 0;
47
48 default: 63 default:
49 return -EINVAL; 64 return -EINVAL;
50 } 65 }
51 66
52 /* when arrive here ret=0 or sent data */ 67 return 1;
53 ssfw->pos += sigma_action_size(sa, len);
54 return ssfw->pos == ssfw->fw->size;
55} 68}
56 69
57static int 70static int
58process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw) 71process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
59{ 72{
60 pr_debug("%s: processing %p\n", __func__, ssfw); 73 struct sigma_action *sa;
74 size_t size;
75 int ret;
76
77 while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
78 sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
79
80 size = sigma_action_size(sa);
81 ssfw->pos += size;
82 if (ssfw->pos > ssfw->fw->size || size == 0)
83 break;
84
85 ret = process_sigma_action(client, sa);
61 86
62 while (1) {
63 int ret = process_sigma_action(client, ssfw);
64 pr_debug("%s: action returned %i\n", __func__, ret); 87 pr_debug("%s: action returned %i\n", __func__, ret);
65 if (ret == 1) 88
66 return 0; 89 if (ret <= 0)
67 else if (ret)
68 return ret; 90 return ret;
69 } 91 }
92
93 if (ssfw->pos != ssfw->fw->size)
94 return -EINVAL;
95
96 return 0;
70} 97}
71 98
72int process_sigma_firmware(struct i2c_client *client, const char *name) 99int process_sigma_firmware(struct i2c_client *client, const char *name)
@@ -89,16 +116,24 @@ int process_sigma_firmware(struct i2c_client *client, const char *name)
89 116
90 /* then verify the header */ 117 /* then verify the header */
91 ret = -EINVAL; 118 ret = -EINVAL;
92 if (fw->size < sizeof(*ssfw_head)) 119
120 /*
121 * Reject too small or unreasonable large files. The upper limit has been
122 * chosen a bit arbitrarily, but it should be enough for all practical
123 * purposes and having the limit makes it easier to avoid integer
124 * overflows later in the loading process.
125 */
126 if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000)
93 goto done; 127 goto done;
94 128
95 ssfw_head = (void *)fw->data; 129 ssfw_head = (void *)fw->data;
96 if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic))) 130 if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
97 goto done; 131 goto done;
98 132
99 crc = crc32(0, fw->data, fw->size); 133 crc = crc32(0, fw->data + sizeof(*ssfw_head),
134 fw->size - sizeof(*ssfw_head));
100 pr_debug("%s: crc=%x\n", __func__, crc); 135 pr_debug("%s: crc=%x\n", __func__, crc);
101 if (crc != ssfw_head->crc) 136 if (crc != le32_to_cpu(ssfw_head->crc))
102 goto done; 137 goto done;
103 138
104 ssfw.pos = sizeof(*ssfw_head); 139 ssfw.pos = sizeof(*ssfw_head);
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index dbcb0bcfd8da..4e018d6a7639 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
18obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o 18obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
19obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o 19obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
20obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o 20obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
21obj-$(CONFIG_MACH_KS8695) += gpio-ks8695.o 21obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
22obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o 22obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
23obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o 23obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
24obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o 24obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 038f5eb8b13d..f8ce29ef9f88 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -22,7 +22,6 @@
22#include <linux/mfd/da9052/da9052.h> 22#include <linux/mfd/da9052/da9052.h>
23#include <linux/mfd/da9052/reg.h> 23#include <linux/mfd/da9052/reg.h>
24#include <linux/mfd/da9052/pdata.h> 24#include <linux/mfd/da9052/pdata.h>
25#include <linux/mfd/da9052/gpio.h>
26 25
27#define DA9052_INPUT 1 26#define DA9052_INPUT 1
28#define DA9052_OUTPUT_OPENDRAIN 2 27#define DA9052_OUTPUT_OPENDRAIN 2
@@ -43,6 +42,9 @@
43#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0 42#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0
44#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F 43#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F
45#define DA9052_GPIO_NIBBLE_SHIFT 4 44#define DA9052_GPIO_NIBBLE_SHIFT 4
45#define DA9052_IRQ_GPI0 16
46#define DA9052_GPIO_ODD_SHIFT 7
47#define DA9052_GPIO_EVEN_SHIFT 3
46 48
47struct da9052_gpio { 49struct da9052_gpio {
48 struct da9052 *da9052; 50 struct da9052 *da9052;
@@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
104static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value) 106static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
105{ 107{
106 struct da9052_gpio *gpio = to_da9052_gpio(gc); 108 struct da9052_gpio *gpio = to_da9052_gpio(gc);
107 unsigned char register_value = 0;
108 int ret; 109 int ret;
109 110
110 if (da9052_gpio_port_odd(offset)) { 111 if (da9052_gpio_port_odd(offset)) {
111 if (value) {
112 register_value = DA9052_GPIO_ODD_PORT_MODE;
113 ret = da9052_reg_update(gpio->da9052, (offset >> 1) + 112 ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
114 DA9052_GPIO_0_1_REG, 113 DA9052_GPIO_0_1_REG,
115 DA9052_GPIO_ODD_PORT_MODE, 114 DA9052_GPIO_ODD_PORT_MODE,
116 register_value); 115 value << DA9052_GPIO_ODD_SHIFT);
117 if (ret != 0) 116 if (ret != 0)
118 dev_err(gpio->da9052->dev, 117 dev_err(gpio->da9052->dev,
119 "Failed to updated gpio odd reg,%d", 118 "Failed to updated gpio odd reg,%d",
120 ret); 119 ret);
121 }
122 } else { 120 } else {
123 if (value) {
124 register_value = DA9052_GPIO_EVEN_PORT_MODE;
125 ret = da9052_reg_update(gpio->da9052, (offset >> 1) + 121 ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
126 DA9052_GPIO_0_1_REG, 122 DA9052_GPIO_0_1_REG,
127 DA9052_GPIO_EVEN_PORT_MODE, 123 DA9052_GPIO_EVEN_PORT_MODE,
128 register_value); 124 value << DA9052_GPIO_EVEN_SHIFT);
129 if (ret != 0) 125 if (ret != 0)
130 dev_err(gpio->da9052->dev, 126 dev_err(gpio->da9052->dev,
131 "Failed to updated gpio even reg,%d", 127 "Failed to updated gpio even reg,%d",
132 ret); 128 ret);
133 }
134 } 129 }
135} 130}
136 131
@@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
201 .direction_input = da9052_gpio_direction_input, 196 .direction_input = da9052_gpio_direction_input,
202 .direction_output = da9052_gpio_direction_output, 197 .direction_output = da9052_gpio_direction_output,
203 .to_irq = da9052_gpio_to_irq, 198 .to_irq = da9052_gpio_to_irq,
204 .can_sleep = 1; 199 .can_sleep = 1,
205 .ngpio = 16; 200 .ngpio = 16,
206 .base = -1; 201 .base = -1,
207}; 202};
208 203
209static int __devinit da9052_gpio_probe(struct platform_device *pdev) 204static int __devinit da9052_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index ea8e73869250..461958fc2264 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
332 &chip->reg->regs[chip->ch].imask); 332 &chip->reg->regs[chip->ch].imask);
333} 333}
334 334
335static void ioh_irq_disable(struct irq_data *d)
336{
337 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
338 struct ioh_gpio *chip = gc->private;
339 unsigned long flags;
340 u32 ien;
341
342 spin_lock_irqsave(&chip->spinlock, flags);
343 ien = ioread32(&chip->reg->regs[chip->ch].ien);
344 ien &= ~(1 << (d->irq - chip->irq_base));
345 iowrite32(ien, &chip->reg->regs[chip->ch].ien);
346 spin_unlock_irqrestore(&chip->spinlock, flags);
347}
348
349static void ioh_irq_enable(struct irq_data *d)
350{
351 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
352 struct ioh_gpio *chip = gc->private;
353 unsigned long flags;
354 u32 ien;
355
356 spin_lock_irqsave(&chip->spinlock, flags);
357 ien = ioread32(&chip->reg->regs[chip->ch].ien);
358 ien |= 1 << (d->irq - chip->irq_base);
359 iowrite32(ien, &chip->reg->regs[chip->ch].ien);
360 spin_unlock_irqrestore(&chip->spinlock, flags);
361}
362
335static irqreturn_t ioh_gpio_handler(int irq, void *dev_id) 363static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
336{ 364{
337 struct ioh_gpio *chip = dev_id; 365 struct ioh_gpio *chip = dev_id;
@@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
339 int i, j; 367 int i, j;
340 int ret = IRQ_NONE; 368 int ret = IRQ_NONE;
341 369
342 for (i = 0; i < 8; i++) { 370 for (i = 0; i < 8; i++, chip++) {
343 reg_val = ioread32(&chip->reg->regs[i].istatus); 371 reg_val = ioread32(&chip->reg->regs[i].istatus);
344 for (j = 0; j < num_ports[i]; j++) { 372 for (j = 0; j < num_ports[i]; j++) {
345 if (reg_val & BIT(j)) { 373 if (reg_val & BIT(j)) {
@@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
370 ct->chip.irq_mask = ioh_irq_mask; 398 ct->chip.irq_mask = ioh_irq_mask;
371 ct->chip.irq_unmask = ioh_irq_unmask; 399 ct->chip.irq_unmask = ioh_irq_unmask;
372 ct->chip.irq_set_type = ioh_irq_type; 400 ct->chip.irq_set_type = ioh_irq_type;
401 ct->chip.irq_disable = ioh_irq_disable;
402 ct->chip.irq_enable = ioh_irq_enable;
373 403
374 irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, 404 irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
375 IRQ_NOREQUEST | IRQ_NOPROBE, 0); 405 IRQ_NOREQUEST | IRQ_NOPROBE, 0);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index ec3fcf0a7e12..5cd04b65c556 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
132 return 0; 132 return 0;
133} 133}
134 134
135static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
136{
137 /* GPIO 28..31 are input only on MPC5121 */
138 if (gpio >= 28)
139 return -EINVAL;
140
141 return mpc8xxx_gpio_dir_out(gc, gpio, val);
142}
143
135static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset) 144static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
136{ 145{
137 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); 146 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
340 mm_gc->save_regs = mpc8xxx_gpio_save_regs; 349 mm_gc->save_regs = mpc8xxx_gpio_save_regs;
341 gc->ngpio = MPC8XXX_GPIO_PINS; 350 gc->ngpio = MPC8XXX_GPIO_PINS;
342 gc->direction_input = mpc8xxx_gpio_dir_in; 351 gc->direction_input = mpc8xxx_gpio_dir_in;
343 gc->direction_output = mpc8xxx_gpio_dir_out; 352 gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
344 if (of_device_is_compatible(np, "fsl,mpc8572-gpio")) 353 mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
345 gc->get = mpc8572_gpio_get; 354 gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
346 else 355 mpc8572_gpio_get : mpc8xxx_gpio_get;
347 gc->get = mpc8xxx_gpio_get;
348 gc->set = mpc8xxx_gpio_set; 356 gc->set = mpc8xxx_gpio_set;
349 gc->to_irq = mpc8xxx_gpio_to_irq; 357 gc->to_irq = mpc8xxx_gpio_to_irq;
350 358
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 0e49d87f6c60..0b0562979171 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -148,13 +148,17 @@ static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
148 return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0; 148 return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
149} 149}
150 150
151#define MOD_REG_BIT(reg, bit_mask, set) \ 151static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
152do { \ 152{
153 int l = __raw_readl(base + reg); \ 153 int l = __raw_readl(base + reg);
154 if (set) l |= bit_mask; \ 154
155 else l &= ~bit_mask; \ 155 if (set)
156 __raw_writel(l, base + reg); \ 156 l |= mask;
157} while(0) 157 else
158 l &= ~mask;
159
160 __raw_writel(l, base + reg);
161}
158 162
159/** 163/**
160 * _set_gpio_debounce - low level gpio debounce time 164 * _set_gpio_debounce - low level gpio debounce time
@@ -210,28 +214,28 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
210 u32 gpio_bit = 1 << gpio; 214 u32 gpio_bit = 1 << gpio;
211 215
212 if (cpu_is_omap44xx()) { 216 if (cpu_is_omap44xx()) {
213 MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit, 217 _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT0, gpio_bit,
214 trigger & IRQ_TYPE_LEVEL_LOW); 218 trigger & IRQ_TYPE_LEVEL_LOW);
215 MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT1, gpio_bit, 219 _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT1, gpio_bit,
216 trigger & IRQ_TYPE_LEVEL_HIGH); 220 trigger & IRQ_TYPE_LEVEL_HIGH);
217 MOD_REG_BIT(OMAP4_GPIO_RISINGDETECT, gpio_bit, 221 _gpio_rmw(base, OMAP4_GPIO_RISINGDETECT, gpio_bit,
218 trigger & IRQ_TYPE_EDGE_RISING); 222 trigger & IRQ_TYPE_EDGE_RISING);
219 MOD_REG_BIT(OMAP4_GPIO_FALLINGDETECT, gpio_bit, 223 _gpio_rmw(base, OMAP4_GPIO_FALLINGDETECT, gpio_bit,
220 trigger & IRQ_TYPE_EDGE_FALLING); 224 trigger & IRQ_TYPE_EDGE_FALLING);
221 } else { 225 } else {
222 MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit, 226 _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
223 trigger & IRQ_TYPE_LEVEL_LOW); 227 trigger & IRQ_TYPE_LEVEL_LOW);
224 MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit, 228 _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
225 trigger & IRQ_TYPE_LEVEL_HIGH); 229 trigger & IRQ_TYPE_LEVEL_HIGH);
226 MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit, 230 _gpio_rmw(base, OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
227 trigger & IRQ_TYPE_EDGE_RISING); 231 trigger & IRQ_TYPE_EDGE_RISING);
228 MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit, 232 _gpio_rmw(base, OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
229 trigger & IRQ_TYPE_EDGE_FALLING); 233 trigger & IRQ_TYPE_EDGE_FALLING);
230 } 234 }
231 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { 235 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
232 if (cpu_is_omap44xx()) { 236 if (cpu_is_omap44xx()) {
233 MOD_REG_BIT(OMAP4_GPIO_IRQWAKEN0, gpio_bit, 237 _gpio_rmw(base, OMAP4_GPIO_IRQWAKEN0, gpio_bit,
234 trigger != 0); 238 trigger != 0);
235 } else { 239 } else {
236 /* 240 /*
237 * GPIO wakeup request can only be generated on edge 241 * GPIO wakeup request can only be generated on edge
@@ -1086,6 +1090,11 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
1086 1090
1087 gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base, 1091 gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
1088 handle_simple_irq); 1092 handle_simple_irq);
1093 if (!gc) {
1094 dev_err(bank->dev, "Memory alloc failed for gc\n");
1095 return;
1096 }
1097
1089 ct = gc->chip_types; 1098 ct = gc->chip_types;
1090 1099
1091 /* NOTE: No ack required, reading IRQ status clears it. */ 1100 /* NOTE: No ack required, reading IRQ status clears it. */
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 0550dcb85814..d3f3e8f54561 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -546,7 +546,7 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip)
546 * Translate OpenFirmware node properties into platform_data 546 * Translate OpenFirmware node properties into platform_data
547 * WARNING: This is DEPRECATED and will be removed eventually! 547 * WARNING: This is DEPRECATED and will be removed eventually!
548 */ 548 */
549void 549static void
550pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) 550pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
551{ 551{
552 struct device_node *node; 552 struct device_node *node;
@@ -574,7 +574,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
574 *invert = *val; 574 *invert = *val;
575} 575}
576#else 576#else
577void 577static void
578pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) 578pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
579{ 579{
580 *gpio_base = -1; 580 *gpio_base = -1;
@@ -596,9 +596,6 @@ static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert)
596 596
597 /* set platform specific polarity inversion */ 597 /* set platform specific polarity inversion */
598 ret = pca953x_write_reg(chip, PCA953X_INVERT, invert); 598 ret = pca953x_write_reg(chip, PCA953X_INVERT, invert);
599 if (ret)
600 goto out;
601 return 0;
602out: 599out:
603 return ret; 600 return ret;
604} 601}
@@ -640,7 +637,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
640 struct pca953x_platform_data *pdata; 637 struct pca953x_platform_data *pdata;
641 struct pca953x_chip *chip; 638 struct pca953x_chip *chip;
642 int irq_base=0, invert=0; 639 int irq_base=0, invert=0;
643 int ret = 0; 640 int ret;
644 641
645 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL); 642 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
646 if (chip == NULL) 643 if (chip == NULL)
@@ -673,10 +670,10 @@ static int __devinit pca953x_probe(struct i2c_client *client,
673 pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK); 670 pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
674 671
675 if (chip->chip_type == PCA953X_TYPE) 672 if (chip->chip_type == PCA953X_TYPE)
676 device_pca953x_init(chip, invert); 673 ret = device_pca953x_init(chip, invert);
677 else if (chip->chip_type == PCA957X_TYPE)
678 device_pca957x_init(chip, invert);
679 else 674 else
675 ret = device_pca957x_init(chip, invert);
676 if (ret)
680 goto out_failed; 677 goto out_failed;
681 678
682 ret = pca953x_irq_setup(chip, id, irq_base); 679 ret = pca953x_irq_setup(chip, id, irq_base);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 093c90bd3c1d..4102f63230fd 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -238,10 +238,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
238 int ret, irq, i; 238 int ret, irq, i;
239 static DECLARE_BITMAP(init_irq, NR_IRQS); 239 static DECLARE_BITMAP(init_irq, NR_IRQS);
240 240
241 pdata = dev->dev.platform_data;
242 if (pdata == NULL)
243 return -ENODEV;
244
245 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 241 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
246 if (chip == NULL) 242 if (chip == NULL)
247 return -ENOMEM; 243 return -ENOMEM;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 45079046b6d5..2418429a9836 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,7 +9,6 @@ menuconfig DRM
9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU 9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
10 select I2C 10 select I2C
11 select I2C_ALGOBIT 11 select I2C_ALGOBIT
12 select SLOW_WORK
13 help 12 help
14 Kernel-level support for the Direct Rendering Infrastructure (DRI) 13 Kernel-level support for the Direct Rendering Infrastructure (DRI)
15 introduced in XFree86 4.0. If you say Y here, you need to select 14 introduced in XFree86 4.0. If you say Y here, you need to select
@@ -96,6 +95,7 @@ config DRM_I915
96 select FB_CFB_IMAGEBLIT 95 select FB_CFB_IMAGEBLIT
97 # i915 depends on ACPI_VIDEO when ACPI is enabled 96 # i915 depends on ACPI_VIDEO when ACPI is enabled
98 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 97 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
98 select BACKLIGHT_LCD_SUPPORT if ACPI
99 select BACKLIGHT_CLASS_DEVICE if ACPI 99 select BACKLIGHT_CLASS_DEVICE if ACPI
100 select VIDEO_OUTPUT_CONTROL if ACPI 100 select VIDEO_OUTPUT_CONTROL if ACPI
101 select INPUT if ACPI 101 select INPUT if ACPI
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 0d1faa72e1ff..f259a2563204 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2340,6 +2340,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
2340 } 2340 }
2341 2341
2342 if (num_clips && clips_ptr) { 2342 if (num_clips && clips_ptr) {
2343 if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
2344 ret = -EINVAL;
2345 goto out_err1;
2346 }
2343 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 2347 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
2344 if (!clips) { 2348 if (!clips) {
2345 ret = -ENOMEM; 2349 ret = -ENOMEM;
@@ -2585,8 +2589,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
2585 property->num_values = num_values; 2589 property->num_values = num_values;
2586 INIT_LIST_HEAD(&property->enum_blob_list); 2590 INIT_LIST_HEAD(&property->enum_blob_list);
2587 2591
2588 if (name) 2592 if (name) {
2589 strncpy(property->name, name, DRM_PROP_NAME_LEN); 2593 strncpy(property->name, name, DRM_PROP_NAME_LEN);
2594 property->name[DRM_PROP_NAME_LEN-1] = '\0';
2595 }
2590 2596
2591 list_add_tail(&property->head, &dev->mode_config.property_list); 2597 list_add_tail(&property->head, &dev->mode_config.property_list);
2592 return property; 2598 return property;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index ccbdc0b5854c..42f86e71479a 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -457,6 +457,30 @@ done:
457EXPORT_SYMBOL(drm_crtc_helper_set_mode); 457EXPORT_SYMBOL(drm_crtc_helper_set_mode);
458 458
459 459
460static int
461drm_crtc_helper_disable(struct drm_crtc *crtc)
462{
463 struct drm_device *dev = crtc->dev;
464 struct drm_connector *connector;
465 struct drm_encoder *encoder;
466
467 /* Decouple all encoders and their attached connectors from this crtc */
468 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
469 if (encoder->crtc != crtc)
470 continue;
471
472 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
473 if (connector->encoder != encoder)
474 continue;
475
476 connector->encoder = NULL;
477 }
478 }
479
480 drm_helper_disable_unused_functions(dev);
481 return 0;
482}
483
460/** 484/**
461 * drm_crtc_helper_set_config - set a new config from userspace 485 * drm_crtc_helper_set_config - set a new config from userspace
462 * @crtc: CRTC to setup 486 * @crtc: CRTC to setup
@@ -485,6 +509,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
485 struct drm_connector *save_connectors, *connector; 509 struct drm_connector *save_connectors, *connector;
486 int count = 0, ro, fail = 0; 510 int count = 0, ro, fail = 0;
487 struct drm_crtc_helper_funcs *crtc_funcs; 511 struct drm_crtc_helper_funcs *crtc_funcs;
512 struct drm_mode_set save_set;
488 int ret = 0; 513 int ret = 0;
489 int i; 514 int i;
490 515
@@ -510,8 +535,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
510 (int)set->num_connectors, set->x, set->y); 535 (int)set->num_connectors, set->x, set->y);
511 } else { 536 } else {
512 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); 537 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
513 set->mode = NULL; 538 return drm_crtc_helper_disable(set->crtc);
514 set->num_connectors = 0;
515 } 539 }
516 540
517 dev = set->crtc->dev; 541 dev = set->crtc->dev;
@@ -557,6 +581,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
557 save_connectors[count++] = *connector; 581 save_connectors[count++] = *connector;
558 } 582 }
559 583
584 save_set.crtc = set->crtc;
585 save_set.mode = &set->crtc->mode;
586 save_set.x = set->crtc->x;
587 save_set.y = set->crtc->y;
588 save_set.fb = set->crtc->fb;
589
560 /* We should be able to check here if the fb has the same properties 590 /* We should be able to check here if the fb has the same properties
561 * and then just flip_or_move it */ 591 * and then just flip_or_move it */
562 if (set->crtc->fb != set->fb) { 592 if (set->crtc->fb != set->fb) {
@@ -722,6 +752,12 @@ fail:
722 *connector = save_connectors[count++]; 752 *connector = save_connectors[count++];
723 } 753 }
724 754
755 /* Try to restore the config */
756 if (mode_changed &&
757 !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
758 save_set.y, save_set.fb))
759 DRM_ERROR("failed to restore config after modeset failure\n");
760
725 kfree(save_connectors); 761 kfree(save_connectors);
726 kfree(save_encoders); 762 kfree(save_encoders);
727 kfree(save_crtcs); 763 kfree(save_crtcs);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index d067c12ba940..1c7a1c0d3edd 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -118,7 +118,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
118 tmp->minor = minor; 118 tmp->minor = minor;
119 tmp->dent = ent; 119 tmp->dent = ent;
120 tmp->info_ent = &files[i]; 120 tmp->info_ent = &files[i];
121 list_add(&(tmp->list), &(minor->debugfs_nodes.list)); 121
122 mutex_lock(&minor->debugfs_lock);
123 list_add(&tmp->list, &minor->debugfs_list);
124 mutex_unlock(&minor->debugfs_lock);
122 } 125 }
123 return 0; 126 return 0;
124 127
@@ -146,7 +149,8 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
146 char name[64]; 149 char name[64];
147 int ret; 150 int ret;
148 151
149 INIT_LIST_HEAD(&minor->debugfs_nodes.list); 152 INIT_LIST_HEAD(&minor->debugfs_list);
153 mutex_init(&minor->debugfs_lock);
150 sprintf(name, "%d", minor_id); 154 sprintf(name, "%d", minor_id);
151 minor->debugfs_root = debugfs_create_dir(name, root); 155 minor->debugfs_root = debugfs_create_dir(name, root);
152 if (!minor->debugfs_root) { 156 if (!minor->debugfs_root) {
@@ -192,8 +196,9 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
192 struct drm_info_node *tmp; 196 struct drm_info_node *tmp;
193 int i; 197 int i;
194 198
199 mutex_lock(&minor->debugfs_lock);
195 for (i = 0; i < count; i++) { 200 for (i = 0; i < count; i++) {
196 list_for_each_safe(pos, q, &minor->debugfs_nodes.list) { 201 list_for_each_safe(pos, q, &minor->debugfs_list) {
197 tmp = list_entry(pos, struct drm_info_node, list); 202 tmp = list_entry(pos, struct drm_info_node, list);
198 if (tmp->info_ent == &files[i]) { 203 if (tmp->info_ent == &files[i]) {
199 debugfs_remove(tmp->dent); 204 debugfs_remove(tmp->dent);
@@ -202,6 +207,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
202 } 207 }
203 } 208 }
204 } 209 }
210 mutex_unlock(&minor->debugfs_lock);
205 return 0; 211 return 0;
206} 212}
207EXPORT_SYMBOL(drm_debugfs_remove_files); 213EXPORT_SYMBOL(drm_debugfs_remove_files);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index eaf25ffd9a46..bc5febe45762 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -125,7 +125,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
125 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 125 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
126 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 126 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
127 127
128 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), 128 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
129 129
130 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), 130 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
131 131
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index cb3794a00f98..44a5d0ad8b7c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -110,10 +110,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
110 /* Prevent vblank irq processing while disabling vblank irqs, 110 /* Prevent vblank irq processing while disabling vblank irqs,
111 * so no updates of timestamps or count can happen after we've 111 * so no updates of timestamps or count can happen after we've
112 * disabled. Needed to prevent races in case of delayed irq's. 112 * disabled. Needed to prevent races in case of delayed irq's.
113 * Disable preemption, so vblank_time_lock is held as short as
114 * possible, even under a kernel with PREEMPT_RT patches.
115 */ 113 */
116 preempt_disable();
117 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 114 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
118 115
119 dev->driver->disable_vblank(dev, crtc); 116 dev->driver->disable_vblank(dev, crtc);
@@ -164,7 +161,6 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
164 clear_vblank_timestamps(dev, crtc); 161 clear_vblank_timestamps(dev, crtc);
165 162
166 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 163 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
167 preempt_enable();
168} 164}
169 165
170static void vblank_disable_fn(unsigned long arg) 166static void vblank_disable_fn(unsigned long arg)
@@ -407,13 +403,16 @@ int drm_irq_uninstall(struct drm_device *dev)
407 /* 403 /*
408 * Wake up any waiters so they don't hang. 404 * Wake up any waiters so they don't hang.
409 */ 405 */
410 spin_lock_irqsave(&dev->vbl_lock, irqflags); 406 if (dev->num_crtcs) {
411 for (i = 0; i < dev->num_crtcs; i++) { 407 spin_lock_irqsave(&dev->vbl_lock, irqflags);
412 DRM_WAKEUP(&dev->vbl_queue[i]); 408 for (i = 0; i < dev->num_crtcs; i++) {
413 dev->vblank_enabled[i] = 0; 409 DRM_WAKEUP(&dev->vbl_queue[i]);
414 dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); 410 dev->vblank_enabled[i] = 0;
411 dev->last_vblank[i] =
412 dev->driver->get_vblank_counter(dev, i);
413 }
414 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
415 } 415 }
416 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
417 416
418 if (!irq_enabled) 417 if (!irq_enabled)
419 return -EINVAL; 418 return -EINVAL;
@@ -886,10 +885,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
886 spin_lock_irqsave(&dev->vbl_lock, irqflags); 885 spin_lock_irqsave(&dev->vbl_lock, irqflags);
887 /* Going from 0->1 means we have to enable interrupts again */ 886 /* Going from 0->1 means we have to enable interrupts again */
888 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { 887 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
889 /* Disable preemption while holding vblank_time_lock. Do
890 * it explicitely to guard against PREEMPT_RT kernel.
891 */
892 preempt_disable();
893 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); 888 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
894 if (!dev->vblank_enabled[crtc]) { 889 if (!dev->vblank_enabled[crtc]) {
895 /* Enable vblank irqs under vblank_time_lock protection. 890 /* Enable vblank irqs under vblank_time_lock protection.
@@ -909,7 +904,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
909 } 904 }
910 } 905 }
911 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); 906 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
912 preempt_enable();
913 } else { 907 } else {
914 if (!dev->vblank_enabled[crtc]) { 908 if (!dev->vblank_enabled[crtc]) {
915 atomic_dec(&dev->vblank_refcount[crtc]); 909 atomic_dec(&dev->vblank_refcount[crtc]);
@@ -1125,6 +1119,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
1125 trace_drm_vblank_event_delivered(current->pid, pipe, 1119 trace_drm_vblank_event_delivered(current->pid, pipe,
1126 vblwait->request.sequence); 1120 vblwait->request.sequence);
1127 } else { 1121 } else {
1122 /* drm_handle_vblank_events will call drm_vblank_put */
1128 list_add_tail(&e->base.link, &dev->vblank_event_list); 1123 list_add_tail(&e->base.link, &dev->vblank_event_list);
1129 vblwait->reply.sequence = vblwait->request.sequence; 1124 vblwait->reply.sequence = vblwait->request.sequence;
1130 } 1125 }
@@ -1205,8 +1200,12 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1205 goto done; 1200 goto done;
1206 } 1201 }
1207 1202
1208 if (flags & _DRM_VBLANK_EVENT) 1203 if (flags & _DRM_VBLANK_EVENT) {
1204 /* must hold on to the vblank ref until the event fires
1205 * drm_vblank_put will be called asynchronously
1206 */
1209 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); 1207 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
1208 }
1210 1209
1211 if ((flags & _DRM_VBLANK_NEXTONMISS) && 1210 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
1212 (seq - vblwait->request.sequence) <= (1<<23)) { 1211 (seq - vblwait->request.sequence) <= (1<<23)) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 6f8afea94fc9..2bb07bca511a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -27,82 +27,84 @@
27#include "drm.h" 27#include "drm.h"
28 28
29#include "exynos_drm_drv.h" 29#include "exynos_drm_drv.h"
30#include "exynos_drm_gem.h"
30#include "exynos_drm_buf.h" 31#include "exynos_drm_buf.h"
31 32
32static DEFINE_MUTEX(exynos_drm_buf_lock);
33
34static int lowlevel_buffer_allocate(struct drm_device *dev, 33static int lowlevel_buffer_allocate(struct drm_device *dev,
35 struct exynos_drm_buf_entry *entry) 34 struct exynos_drm_gem_buf *buffer)
36{ 35{
37 DRM_DEBUG_KMS("%s\n", __FILE__); 36 DRM_DEBUG_KMS("%s\n", __FILE__);
38 37
39 entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size, 38 buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
40 (dma_addr_t *)&entry->paddr, GFP_KERNEL); 39 &buffer->dma_addr, GFP_KERNEL);
41 if (!entry->paddr) { 40 if (!buffer->kvaddr) {
42 DRM_ERROR("failed to allocate buffer.\n"); 41 DRM_ERROR("failed to allocate buffer.\n");
43 return -ENOMEM; 42 return -ENOMEM;
44 } 43 }
45 44
46 DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n", 45 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
47 (unsigned int)entry->vaddr, entry->paddr, entry->size); 46 (unsigned long)buffer->kvaddr,
47 (unsigned long)buffer->dma_addr,
48 buffer->size);
48 49
49 return 0; 50 return 0;
50} 51}
51 52
52static void lowlevel_buffer_deallocate(struct drm_device *dev, 53static void lowlevel_buffer_deallocate(struct drm_device *dev,
53 struct exynos_drm_buf_entry *entry) 54 struct exynos_drm_gem_buf *buffer)
54{ 55{
55 DRM_DEBUG_KMS("%s.\n", __FILE__); 56 DRM_DEBUG_KMS("%s.\n", __FILE__);
56 57
57 if (entry->paddr && entry->vaddr && entry->size) 58 if (buffer->dma_addr && buffer->size)
58 dma_free_writecombine(dev->dev, entry->size, entry->vaddr, 59 dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
59 entry->paddr); 60 (dma_addr_t)buffer->dma_addr);
60 else 61 else
61 DRM_DEBUG_KMS("entry data is null.\n"); 62 DRM_DEBUG_KMS("buffer data are invalid.\n");
62} 63}
63 64
64struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, 65struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
65 unsigned int size) 66 unsigned int size)
66{ 67{
67 struct exynos_drm_buf_entry *entry; 68 struct exynos_drm_gem_buf *buffer;
68 69
69 DRM_DEBUG_KMS("%s.\n", __FILE__); 70 DRM_DEBUG_KMS("%s.\n", __FILE__);
71 DRM_DEBUG_KMS("desired size = 0x%x\n", size);
70 72
71 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 73 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
72 if (!entry) { 74 if (!buffer) {
73 DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n"); 75 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
74 return ERR_PTR(-ENOMEM); 76 return ERR_PTR(-ENOMEM);
75 } 77 }
76 78
77 entry->size = size; 79 buffer->size = size;
78 80
79 /* 81 /*
80 * allocate memory region with size and set the memory information 82 * allocate memory region with size and set the memory information
81 * to vaddr and paddr of a entry object. 83 * to vaddr and dma_addr of a buffer object.
82 */ 84 */
83 if (lowlevel_buffer_allocate(dev, entry) < 0) { 85 if (lowlevel_buffer_allocate(dev, buffer) < 0) {
84 kfree(entry); 86 kfree(buffer);
85 entry = NULL; 87 buffer = NULL;
86 return ERR_PTR(-ENOMEM); 88 return ERR_PTR(-ENOMEM);
87 } 89 }
88 90
89 return entry; 91 return buffer;
90} 92}
91 93
92void exynos_drm_buf_destroy(struct drm_device *dev, 94void exynos_drm_buf_destroy(struct drm_device *dev,
93 struct exynos_drm_buf_entry *entry) 95 struct exynos_drm_gem_buf *buffer)
94{ 96{
95 DRM_DEBUG_KMS("%s.\n", __FILE__); 97 DRM_DEBUG_KMS("%s.\n", __FILE__);
96 98
97 if (!entry) { 99 if (!buffer) {
98 DRM_DEBUG_KMS("entry is null.\n"); 100 DRM_DEBUG_KMS("buffer is null.\n");
99 return; 101 return;
100 } 102 }
101 103
102 lowlevel_buffer_deallocate(dev, entry); 104 lowlevel_buffer_deallocate(dev, buffer);
103 105
104 kfree(entry); 106 kfree(buffer);
105 entry = NULL; 107 buffer = NULL;
106} 108}
107 109
108MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); 110MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 045d59eab01a..6e91f9caa5db 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -26,28 +26,15 @@
26#ifndef _EXYNOS_DRM_BUF_H_ 26#ifndef _EXYNOS_DRM_BUF_H_
27#define _EXYNOS_DRM_BUF_H_ 27#define _EXYNOS_DRM_BUF_H_
28 28
29/*
30 * exynos drm buffer entry structure.
31 *
32 * @paddr: physical address of allocated memory.
33 * @vaddr: kernel virtual address of allocated memory.
34 * @size: size of allocated memory.
35 */
36struct exynos_drm_buf_entry {
37 dma_addr_t paddr;
38 void __iomem *vaddr;
39 unsigned int size;
40};
41
42/* allocate physical memory. */ 29/* allocate physical memory. */
43struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, 30struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
44 unsigned int size); 31 unsigned int size);
45 32
46/* get physical memory information of a drm framebuffer. */ 33/* get memory information of a drm framebuffer. */
47struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb); 34struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
48 35
49/* remove allocated physical memory. */ 36/* remove allocated physical memory. */
50void exynos_drm_buf_destroy(struct drm_device *dev, 37void exynos_drm_buf_destroy(struct drm_device *dev,
51 struct exynos_drm_buf_entry *entry); 38 struct exynos_drm_gem_buf *buffer);
52 39
53#endif 40#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 985d9e768728..d620b0784257 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -37,6 +37,8 @@
37 37
38struct exynos_drm_connector { 38struct exynos_drm_connector {
39 struct drm_connector drm_connector; 39 struct drm_connector drm_connector;
40 uint32_t encoder_id;
41 struct exynos_drm_manager *manager;
40}; 42};
41 43
42/* convert exynos_video_timings to drm_display_mode */ 44/* convert exynos_video_timings to drm_display_mode */
@@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode,
47 DRM_DEBUG_KMS("%s\n", __FILE__); 49 DRM_DEBUG_KMS("%s\n", __FILE__);
48 50
49 mode->clock = timing->pixclock / 1000; 51 mode->clock = timing->pixclock / 1000;
52 mode->vrefresh = timing->refresh;
50 53
51 mode->hdisplay = timing->xres; 54 mode->hdisplay = timing->xres;
52 mode->hsync_start = mode->hdisplay + timing->left_margin; 55 mode->hsync_start = mode->hdisplay + timing->left_margin;
@@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode,
57 mode->vsync_start = mode->vdisplay + timing->upper_margin; 60 mode->vsync_start = mode->vdisplay + timing->upper_margin;
58 mode->vsync_end = mode->vsync_start + timing->vsync_len; 61 mode->vsync_end = mode->vsync_start + timing->vsync_len;
59 mode->vtotal = mode->vsync_end + timing->lower_margin; 62 mode->vtotal = mode->vsync_end + timing->lower_margin;
63
64 if (timing->vmode & FB_VMODE_INTERLACED)
65 mode->flags |= DRM_MODE_FLAG_INTERLACE;
66
67 if (timing->vmode & FB_VMODE_DOUBLE)
68 mode->flags |= DRM_MODE_FLAG_DBLSCAN;
60} 69}
61 70
62/* convert drm_display_mode to exynos_video_timings */ 71/* convert drm_display_mode to exynos_video_timings */
@@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing,
69 memset(timing, 0, sizeof(*timing)); 78 memset(timing, 0, sizeof(*timing));
70 79
71 timing->pixclock = mode->clock * 1000; 80 timing->pixclock = mode->clock * 1000;
72 timing->refresh = mode->vrefresh; 81 timing->refresh = drm_mode_vrefresh(mode);
73 82
74 timing->xres = mode->hdisplay; 83 timing->xres = mode->hdisplay;
75 timing->left_margin = mode->hsync_start - mode->hdisplay; 84 timing->left_margin = mode->hsync_start - mode->hdisplay;
@@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing,
92 101
93static int exynos_drm_connector_get_modes(struct drm_connector *connector) 102static int exynos_drm_connector_get_modes(struct drm_connector *connector)
94{ 103{
95 struct exynos_drm_manager *manager = 104 struct exynos_drm_connector *exynos_connector =
96 exynos_drm_get_manager(connector->encoder); 105 to_exynos_connector(connector);
97 struct exynos_drm_display *display = manager->display; 106 struct exynos_drm_manager *manager = exynos_connector->manager;
107 struct exynos_drm_display_ops *display_ops = manager->display_ops;
98 unsigned int count; 108 unsigned int count;
99 109
100 DRM_DEBUG_KMS("%s\n", __FILE__); 110 DRM_DEBUG_KMS("%s\n", __FILE__);
101 111
102 if (!display) { 112 if (!display_ops) {
103 DRM_DEBUG_KMS("display is null.\n"); 113 DRM_DEBUG_KMS("display_ops is null.\n");
104 return 0; 114 return 0;
105 } 115 }
106 116
@@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
112 * P.S. in case of lcd panel, count is always 1 if success 122 * P.S. in case of lcd panel, count is always 1 if success
113 * because lcd panel has only one mode. 123 * because lcd panel has only one mode.
114 */ 124 */
115 if (display->get_edid) { 125 if (display_ops->get_edid) {
116 int ret; 126 int ret;
117 void *edid; 127 void *edid;
118 128
@@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
122 return 0; 132 return 0;
123 } 133 }
124 134
125 ret = display->get_edid(manager->dev, connector, 135 ret = display_ops->get_edid(manager->dev, connector,
126 edid, MAX_EDID); 136 edid, MAX_EDID);
127 if (ret < 0) { 137 if (ret < 0) {
128 DRM_ERROR("failed to get edid data.\n"); 138 DRM_ERROR("failed to get edid data.\n");
@@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
140 struct drm_display_mode *mode = drm_mode_create(connector->dev); 150 struct drm_display_mode *mode = drm_mode_create(connector->dev);
141 struct fb_videomode *timing; 151 struct fb_videomode *timing;
142 152
143 if (display->get_timing) 153 if (display_ops->get_timing)
144 timing = display->get_timing(manager->dev); 154 timing = display_ops->get_timing(manager->dev);
145 else { 155 else {
146 drm_mode_destroy(connector->dev, mode); 156 drm_mode_destroy(connector->dev, mode);
147 return 0; 157 return 0;
@@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
162static int exynos_drm_connector_mode_valid(struct drm_connector *connector, 172static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
163 struct drm_display_mode *mode) 173 struct drm_display_mode *mode)
164{ 174{
165 struct exynos_drm_manager *manager = 175 struct exynos_drm_connector *exynos_connector =
166 exynos_drm_get_manager(connector->encoder); 176 to_exynos_connector(connector);
167 struct exynos_drm_display *display = manager->display; 177 struct exynos_drm_manager *manager = exynos_connector->manager;
178 struct exynos_drm_display_ops *display_ops = manager->display_ops;
168 struct fb_videomode timing; 179 struct fb_videomode timing;
169 int ret = MODE_BAD; 180 int ret = MODE_BAD;
170 181
@@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
172 183
173 convert_to_video_timing(&timing, mode); 184 convert_to_video_timing(&timing, mode);
174 185
175 if (display && display->check_timing) 186 if (display_ops && display_ops->check_timing)
176 if (!display->check_timing(manager->dev, (void *)&timing)) 187 if (!display_ops->check_timing(manager->dev, (void *)&timing))
177 ret = MODE_OK; 188 ret = MODE_OK;
178 189
179 return ret; 190 return ret;
@@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
181 192
182struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector) 193struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
183{ 194{
195 struct drm_device *dev = connector->dev;
196 struct exynos_drm_connector *exynos_connector =
197 to_exynos_connector(connector);
198 struct drm_mode_object *obj;
199 struct drm_encoder *encoder;
200
184 DRM_DEBUG_KMS("%s\n", __FILE__); 201 DRM_DEBUG_KMS("%s\n", __FILE__);
185 202
186 return connector->encoder; 203 obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
204 DRM_MODE_OBJECT_ENCODER);
205 if (!obj) {
206 DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
207 exynos_connector->encoder_id);
208 return NULL;
209 }
210
211 encoder = obj_to_encoder(obj);
212
213 return encoder;
187} 214}
188 215
189static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { 216static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
196static enum drm_connector_status 223static enum drm_connector_status
197exynos_drm_connector_detect(struct drm_connector *connector, bool force) 224exynos_drm_connector_detect(struct drm_connector *connector, bool force)
198{ 225{
199 struct exynos_drm_manager *manager = 226 struct exynos_drm_connector *exynos_connector =
200 exynos_drm_get_manager(connector->encoder); 227 to_exynos_connector(connector);
201 struct exynos_drm_display *display = manager->display; 228 struct exynos_drm_manager *manager = exynos_connector->manager;
229 struct exynos_drm_display_ops *display_ops =
230 manager->display_ops;
202 enum drm_connector_status status = connector_status_disconnected; 231 enum drm_connector_status status = connector_status_disconnected;
203 232
204 DRM_DEBUG_KMS("%s\n", __FILE__); 233 DRM_DEBUG_KMS("%s\n", __FILE__);
205 234
206 if (display && display->is_connected) { 235 if (display_ops && display_ops->is_connected) {
207 if (display->is_connected(manager->dev)) 236 if (display_ops->is_connected(manager->dev))
208 status = connector_status_connected; 237 status = connector_status_connected;
209 else 238 else
210 status = connector_status_disconnected; 239 status = connector_status_disconnected;
@@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
251 280
252 connector = &exynos_connector->drm_connector; 281 connector = &exynos_connector->drm_connector;
253 282
254 switch (manager->display->type) { 283 switch (manager->display_ops->type) {
255 case EXYNOS_DISPLAY_TYPE_HDMI: 284 case EXYNOS_DISPLAY_TYPE_HDMI:
256 type = DRM_MODE_CONNECTOR_HDMIA; 285 type = DRM_MODE_CONNECTOR_HDMIA;
286 connector->interlace_allowed = true;
287 connector->polled = DRM_CONNECTOR_POLL_HPD;
257 break; 288 break;
258 default: 289 default:
259 type = DRM_MODE_CONNECTOR_Unknown; 290 type = DRM_MODE_CONNECTOR_Unknown;
@@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
267 if (err) 298 if (err)
268 goto err_connector; 299 goto err_connector;
269 300
301 exynos_connector->encoder_id = encoder->base.id;
302 exynos_connector->manager = manager;
270 connector->encoder = encoder; 303 connector->encoder = encoder;
304
271 err = drm_mode_connector_attach_encoder(connector, encoder); 305 err = drm_mode_connector_attach_encoder(connector, encoder);
272 if (err) { 306 if (err) {
273 DRM_ERROR("failed to attach a connector to a encoder\n"); 307 DRM_ERROR("failed to attach a connector to a encoder\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 73893e5068a4..7777d41d1cda 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -29,36 +29,17 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "drm_crtc_helper.h" 30#include "drm_crtc_helper.h"
31 31
32#include "exynos_drm_crtc.h"
32#include "exynos_drm_drv.h" 33#include "exynos_drm_drv.h"
33#include "exynos_drm_fb.h" 34#include "exynos_drm_fb.h"
34#include "exynos_drm_encoder.h" 35#include "exynos_drm_encoder.h"
36#include "exynos_drm_gem.h"
35#include "exynos_drm_buf.h" 37#include "exynos_drm_buf.h"
36 38
37#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\ 39#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
38 drm_crtc) 40 drm_crtc)
39 41
40/* 42/*
41 * Exynos specific crtc postion structure.
42 *
43 * @fb_x: offset x on a framebuffer to be displyed
44 * - the unit is screen coordinates.
45 * @fb_y: offset y on a framebuffer to be displayed
46 * - the unit is screen coordinates.
47 * @crtc_x: offset x on hardware screen.
48 * @crtc_y: offset y on hardware screen.
49 * @crtc_w: width of hardware screen.
50 * @crtc_h: height of hardware screen.
51 */
52struct exynos_drm_crtc_pos {
53 unsigned int fb_x;
54 unsigned int fb_y;
55 unsigned int crtc_x;
56 unsigned int crtc_y;
57 unsigned int crtc_w;
58 unsigned int crtc_h;
59};
60
61/*
62 * Exynos specific crtc structure. 43 * Exynos specific crtc structure.
63 * 44 *
64 * @drm_crtc: crtc object. 45 * @drm_crtc: crtc object.
@@ -85,30 +66,31 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
85 66
86 exynos_drm_fn_encoder(crtc, overlay, 67 exynos_drm_fn_encoder(crtc, overlay,
87 exynos_drm_encoder_crtc_mode_set); 68 exynos_drm_encoder_crtc_mode_set);
88 exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit); 69 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
70 exynos_drm_encoder_crtc_commit);
89} 71}
90 72
91static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, 73int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
92 struct drm_framebuffer *fb, 74 struct drm_framebuffer *fb,
93 struct drm_display_mode *mode, 75 struct drm_display_mode *mode,
94 struct exynos_drm_crtc_pos *pos) 76 struct exynos_drm_crtc_pos *pos)
95{ 77{
96 struct exynos_drm_buf_entry *entry; 78 struct exynos_drm_gem_buf *buffer;
97 unsigned int actual_w; 79 unsigned int actual_w;
98 unsigned int actual_h; 80 unsigned int actual_h;
99 81
100 entry = exynos_drm_fb_get_buf(fb); 82 buffer = exynos_drm_fb_get_buf(fb);
101 if (!entry) { 83 if (!buffer) {
102 DRM_LOG_KMS("entry is null.\n"); 84 DRM_LOG_KMS("buffer is null.\n");
103 return -EFAULT; 85 return -EFAULT;
104 } 86 }
105 87
106 overlay->paddr = entry->paddr; 88 overlay->dma_addr = buffer->dma_addr;
107 overlay->vaddr = entry->vaddr; 89 overlay->vaddr = buffer->kvaddr;
108 90
109 DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", 91 DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
110 (unsigned long)overlay->vaddr, 92 (unsigned long)overlay->vaddr,
111 (unsigned long)overlay->paddr); 93 (unsigned long)overlay->dma_addr);
112 94
113 actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w); 95 actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
114 actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h); 96 actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -171,9 +153,26 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
171 153
172static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) 154static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
173{ 155{
174 DRM_DEBUG_KMS("%s\n", __FILE__); 156 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
175 157
176 /* TODO */ 158 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
159
160 switch (mode) {
161 case DRM_MODE_DPMS_ON:
162 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
163 exynos_drm_encoder_crtc_commit);
164 break;
165 case DRM_MODE_DPMS_STANDBY:
166 case DRM_MODE_DPMS_SUSPEND:
167 case DRM_MODE_DPMS_OFF:
168 /* TODO */
169 exynos_drm_fn_encoder(crtc, NULL,
170 exynos_drm_encoder_crtc_disable);
171 break;
172 default:
173 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
174 break;
175 }
177} 176}
178 177
179static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) 178static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -185,9 +184,12 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
185 184
186static void exynos_drm_crtc_commit(struct drm_crtc *crtc) 185static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
187{ 186{
187 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
188
188 DRM_DEBUG_KMS("%s\n", __FILE__); 189 DRM_DEBUG_KMS("%s\n", __FILE__);
189 190
190 /* drm framework doesn't check NULL. */ 191 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
192 exynos_drm_encoder_crtc_commit);
191} 193}
192 194
193static bool 195static bool
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index c584042d6d2c..25f72a62cb88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
35int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); 35int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
36void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); 36void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
37 37
38/*
39 * Exynos specific crtc postion structure.
40 *
41 * @fb_x: offset x on a framebuffer to be displyed
42 * - the unit is screen coordinates.
43 * @fb_y: offset y on a framebuffer to be displayed
44 * - the unit is screen coordinates.
45 * @crtc_x: offset x on hardware screen.
46 * @crtc_y: offset y on hardware screen.
47 * @crtc_w: width of hardware screen.
48 * @crtc_h: height of hardware screen.
49 */
50struct exynos_drm_crtc_pos {
51 unsigned int fb_x;
52 unsigned int fb_y;
53 unsigned int crtc_x;
54 unsigned int crtc_y;
55 unsigned int crtc_w;
56 unsigned int crtc_h;
57};
58
59int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
60 struct drm_framebuffer *fb,
61 struct drm_display_mode *mode,
62 struct exynos_drm_crtc_pos *pos);
38#endif 63#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 83810cbe3c17..53e2216de61d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -27,6 +27,7 @@
27 27
28#include "drmP.h" 28#include "drmP.h"
29#include "drm.h" 29#include "drm.h"
30#include "drm_crtc_helper.h"
30 31
31#include <drm/exynos_drm.h> 32#include <drm/exynos_drm.h>
32 33
@@ -61,6 +62,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
61 62
62 drm_mode_config_init(dev); 63 drm_mode_config_init(dev);
63 64
65 /* init kms poll for handling hpd */
66 drm_kms_helper_poll_init(dev);
67
64 exynos_drm_mode_config_init(dev); 68 exynos_drm_mode_config_init(dev);
65 69
66 /* 70 /*
@@ -116,6 +120,7 @@ static int exynos_drm_unload(struct drm_device *dev)
116 exynos_drm_fbdev_fini(dev); 120 exynos_drm_fbdev_fini(dev);
117 exynos_drm_device_unregister(dev); 121 exynos_drm_device_unregister(dev);
118 drm_vblank_cleanup(dev); 122 drm_vblank_cleanup(dev);
123 drm_kms_helper_poll_fini(dev);
119 drm_mode_config_cleanup(dev); 124 drm_mode_config_cleanup(dev);
120 kfree(dev->dev_private); 125 kfree(dev->dev_private);
121 126
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c03683f2ae72..5e02e6ecc2e0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -29,6 +29,7 @@
29#ifndef _EXYNOS_DRM_DRV_H_ 29#ifndef _EXYNOS_DRM_DRV_H_
30#define _EXYNOS_DRM_DRV_H_ 30#define _EXYNOS_DRM_DRV_H_
31 31
32#include <linux/module.h>
32#include "drm.h" 33#include "drm.h"
33 34
34#define MAX_CRTC 2 35#define MAX_CRTC 2
@@ -79,8 +80,8 @@ struct exynos_drm_overlay_ops {
79 * @scan_flag: interlace or progressive way. 80 * @scan_flag: interlace or progressive way.
80 * (it could be DRM_MODE_FLAG_*) 81 * (it could be DRM_MODE_FLAG_*)
81 * @bpp: pixel size.(in bit) 82 * @bpp: pixel size.(in bit)
82 * @paddr: bus(accessed by dma) physical memory address to this overlay 83 * @dma_addr: bus(accessed by dma) address to the memory region allocated
83 * and this is physically continuous. 84 * for a overlay.
84 * @vaddr: virtual memory addresss to this overlay. 85 * @vaddr: virtual memory addresss to this overlay.
85 * @default_win: a window to be enabled. 86 * @default_win: a window to be enabled.
86 * @color_key: color key on or off. 87 * @color_key: color key on or off.
@@ -108,7 +109,7 @@ struct exynos_drm_overlay {
108 unsigned int scan_flag; 109 unsigned int scan_flag;
109 unsigned int bpp; 110 unsigned int bpp;
110 unsigned int pitch; 111 unsigned int pitch;
111 dma_addr_t paddr; 112 dma_addr_t dma_addr;
112 void __iomem *vaddr; 113 void __iomem *vaddr;
113 114
114 bool default_win; 115 bool default_win;
@@ -130,7 +131,7 @@ struct exynos_drm_overlay {
130 * @check_timing: check if timing is valid or not. 131 * @check_timing: check if timing is valid or not.
131 * @power_on: display device on or off. 132 * @power_on: display device on or off.
132 */ 133 */
133struct exynos_drm_display { 134struct exynos_drm_display_ops {
134 enum exynos_drm_output_type type; 135 enum exynos_drm_output_type type;
135 bool (*is_connected)(struct device *dev); 136 bool (*is_connected)(struct device *dev);
136 int (*get_edid)(struct device *dev, struct drm_connector *connector, 137 int (*get_edid)(struct device *dev, struct drm_connector *connector,
@@ -146,12 +147,14 @@ struct exynos_drm_display {
146 * @mode_set: convert drm_display_mode to hw specific display mode and 147 * @mode_set: convert drm_display_mode to hw specific display mode and
147 * would be called by encoder->mode_set(). 148 * would be called by encoder->mode_set().
148 * @commit: set current hw specific display mode to hw. 149 * @commit: set current hw specific display mode to hw.
150 * @disable: disable hardware specific display mode.
149 * @enable_vblank: specific driver callback for enabling vblank interrupt. 151 * @enable_vblank: specific driver callback for enabling vblank interrupt.
150 * @disable_vblank: specific driver callback for disabling vblank interrupt. 152 * @disable_vblank: specific driver callback for disabling vblank interrupt.
151 */ 153 */
152struct exynos_drm_manager_ops { 154struct exynos_drm_manager_ops {
153 void (*mode_set)(struct device *subdrv_dev, void *mode); 155 void (*mode_set)(struct device *subdrv_dev, void *mode);
154 void (*commit)(struct device *subdrv_dev); 156 void (*commit)(struct device *subdrv_dev);
157 void (*disable)(struct device *subdrv_dev);
155 int (*enable_vblank)(struct device *subdrv_dev); 158 int (*enable_vblank)(struct device *subdrv_dev);
156 void (*disable_vblank)(struct device *subdrv_dev); 159 void (*disable_vblank)(struct device *subdrv_dev);
157}; 160};
@@ -178,7 +181,7 @@ struct exynos_drm_manager {
178 int pipe; 181 int pipe;
179 struct exynos_drm_manager_ops *ops; 182 struct exynos_drm_manager_ops *ops;
180 struct exynos_drm_overlay_ops *overlay_ops; 183 struct exynos_drm_overlay_ops *overlay_ops;
181 struct exynos_drm_display *display; 184 struct exynos_drm_display_ops *display_ops;
182}; 185};
183 186
184/* 187/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 7cf6fa86a67e..153061415baf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -53,15 +53,36 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
53 struct drm_device *dev = encoder->dev; 53 struct drm_device *dev = encoder->dev;
54 struct drm_connector *connector; 54 struct drm_connector *connector;
55 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 55 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
56 struct exynos_drm_manager_ops *manager_ops = manager->ops;
56 57
57 DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode); 58 DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
58 59
60 switch (mode) {
61 case DRM_MODE_DPMS_ON:
62 if (manager_ops && manager_ops->commit)
63 manager_ops->commit(manager->dev);
64 break;
65 case DRM_MODE_DPMS_STANDBY:
66 case DRM_MODE_DPMS_SUSPEND:
67 case DRM_MODE_DPMS_OFF:
68 /* TODO */
69 if (manager_ops && manager_ops->disable)
70 manager_ops->disable(manager->dev);
71 break;
72 default:
73 DRM_ERROR("unspecified mode %d\n", mode);
74 break;
75 }
76
59 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 77 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
60 if (connector->encoder == encoder) { 78 if (connector->encoder == encoder) {
61 struct exynos_drm_display *display = manager->display; 79 struct exynos_drm_display_ops *display_ops =
80 manager->display_ops;
62 81
63 if (display && display->power_on) 82 DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
64 display->power_on(manager->dev, mode); 83 connector->base.id, mode);
84 if (display_ops && display_ops->power_on)
85 display_ops->power_on(manager->dev, mode);
65 } 86 }
66 } 87 }
67} 88}
@@ -116,15 +137,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
116{ 137{
117 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 138 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
118 struct exynos_drm_manager_ops *manager_ops = manager->ops; 139 struct exynos_drm_manager_ops *manager_ops = manager->ops;
119 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
120 140
121 DRM_DEBUG_KMS("%s\n", __FILE__); 141 DRM_DEBUG_KMS("%s\n", __FILE__);
122 142
123 if (manager_ops && manager_ops->commit) 143 if (manager_ops && manager_ops->commit)
124 manager_ops->commit(manager->dev); 144 manager_ops->commit(manager->dev);
125
126 if (overlay_ops && overlay_ops->commit)
127 overlay_ops->commit(manager->dev);
128} 145}
129 146
130static struct drm_crtc * 147static struct drm_crtc *
@@ -208,10 +225,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
208{ 225{
209 struct drm_device *dev = crtc->dev; 226 struct drm_device *dev = crtc->dev;
210 struct drm_encoder *encoder; 227 struct drm_encoder *encoder;
228 struct exynos_drm_private *private = dev->dev_private;
229 struct exynos_drm_manager *manager;
211 230
212 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 231 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
213 if (encoder->crtc != crtc) 232 /*
214 continue; 233 * if crtc is detached from encoder, check pipe,
234 * otherwise check crtc attached to encoder
235 */
236 if (!encoder->crtc) {
237 manager = to_exynos_encoder(encoder)->manager;
238 if (manager->pipe < 0 ||
239 private->crtc[manager->pipe] != crtc)
240 continue;
241 } else {
242 if (encoder->crtc != crtc)
243 continue;
244 }
215 245
216 fn(encoder, data); 246 fn(encoder, data);
217 } 247 }
@@ -250,8 +280,18 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
250 struct exynos_drm_manager *manager = 280 struct exynos_drm_manager *manager =
251 to_exynos_encoder(encoder)->manager; 281 to_exynos_encoder(encoder)->manager;
252 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 282 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
283 int crtc = *(int *)data;
284
285 DRM_DEBUG_KMS("%s\n", __FILE__);
286
287 /*
288 * when crtc is detached from encoder, this pipe is used
289 * to select manager operation
290 */
291 manager->pipe = crtc;
253 292
254 overlay_ops->commit(manager->dev); 293 if (overlay_ops && overlay_ops->commit)
294 overlay_ops->commit(manager->dev);
255} 295}
256 296
257void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) 297void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -261,7 +301,28 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
261 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 301 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
262 struct exynos_drm_overlay *overlay = data; 302 struct exynos_drm_overlay *overlay = data;
263 303
264 overlay_ops->mode_set(manager->dev, overlay); 304 if (overlay_ops && overlay_ops->mode_set)
305 overlay_ops->mode_set(manager->dev, overlay);
306}
307
308void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
309{
310 struct exynos_drm_manager *manager =
311 to_exynos_encoder(encoder)->manager;
312 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
313
314 DRM_DEBUG_KMS("\n");
315
316 if (overlay_ops && overlay_ops->disable)
317 overlay_ops->disable(manager->dev);
318
319 /*
320 * crtc is already detached from encoder and last
321 * function for detaching is properly done, so
322 * clear pipe from manager to prevent repeated call
323 */
324 if (!encoder->crtc)
325 manager->pipe = -1;
265} 326}
266 327
267MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); 328MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 5ecd645d06a9..a22acfbf0e4e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -41,5 +41,6 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
41void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data); 41void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
42void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data); 42void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
43void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data); 43void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
44void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
44 45
45#endif 46#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 48d29cfd5240..5bf4a1ac7f82 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -29,7 +29,9 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "drm_crtc.h" 30#include "drm_crtc.h"
31#include "drm_crtc_helper.h" 31#include "drm_crtc_helper.h"
32#include "drm_fb_helper.h"
32 33
34#include "exynos_drm_drv.h"
33#include "exynos_drm_fb.h" 35#include "exynos_drm_fb.h"
34#include "exynos_drm_buf.h" 36#include "exynos_drm_buf.h"
35#include "exynos_drm_gem.h" 37#include "exynos_drm_gem.h"
@@ -41,14 +43,14 @@
41 * 43 *
42 * @fb: drm framebuffer obejct. 44 * @fb: drm framebuffer obejct.
43 * @exynos_gem_obj: exynos specific gem object containing a gem object. 45 * @exynos_gem_obj: exynos specific gem object containing a gem object.
44 * @entry: pointer to exynos drm buffer entry object. 46 * @buffer: pointer to exynos_drm_gem_buffer object.
45 * - containing only the information to physically continuous memory 47 * - contain the memory information to memory region allocated
46 * region allocated at default framebuffer creation. 48 * at default framebuffer creation.
47 */ 49 */
48struct exynos_drm_fb { 50struct exynos_drm_fb {
49 struct drm_framebuffer fb; 51 struct drm_framebuffer fb;
50 struct exynos_drm_gem_obj *exynos_gem_obj; 52 struct exynos_drm_gem_obj *exynos_gem_obj;
51 struct exynos_drm_buf_entry *entry; 53 struct exynos_drm_gem_buf *buffer;
52}; 54};
53 55
54static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 56static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -63,8 +65,8 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
63 * default framebuffer has no gem object so 65 * default framebuffer has no gem object so
64 * a buffer of the default framebuffer should be released at here. 66 * a buffer of the default framebuffer should be released at here.
65 */ 67 */
66 if (!exynos_fb->exynos_gem_obj && exynos_fb->entry) 68 if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
67 exynos_drm_buf_destroy(fb->dev, exynos_fb->entry); 69 exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
68 70
69 kfree(exynos_fb); 71 kfree(exynos_fb);
70 exynos_fb = NULL; 72 exynos_fb = NULL;
@@ -143,29 +145,29 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
143 */ 145 */
144 if (!mode_cmd->handle) { 146 if (!mode_cmd->handle) {
145 if (!file_priv) { 147 if (!file_priv) {
146 struct exynos_drm_buf_entry *entry; 148 struct exynos_drm_gem_buf *buffer;
147 149
148 /* 150 /*
149 * in case that file_priv is NULL, it allocates 151 * in case that file_priv is NULL, it allocates
150 * only buffer and this buffer would be used 152 * only buffer and this buffer would be used
151 * for default framebuffer. 153 * for default framebuffer.
152 */ 154 */
153 entry = exynos_drm_buf_create(dev, size); 155 buffer = exynos_drm_buf_create(dev, size);
154 if (IS_ERR(entry)) { 156 if (IS_ERR(buffer)) {
155 ret = PTR_ERR(entry); 157 ret = PTR_ERR(buffer);
156 goto err_buffer; 158 goto err_buffer;
157 } 159 }
158 160
159 exynos_fb->entry = entry; 161 exynos_fb->buffer = buffer;
160 162
161 DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n", 163 DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
162 (unsigned long)entry->paddr, size); 164 (unsigned long)buffer->dma_addr, size);
163 165
164 goto out; 166 goto out;
165 } else { 167 } else {
166 exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, 168 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
167 size, 169 &mode_cmd->handle,
168 &mode_cmd->handle); 170 size);
169 if (IS_ERR(exynos_gem_obj)) { 171 if (IS_ERR(exynos_gem_obj)) {
170 ret = PTR_ERR(exynos_gem_obj); 172 ret = PTR_ERR(exynos_gem_obj);
171 goto err_buffer; 173 goto err_buffer;
@@ -189,10 +191,10 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
189 * so that default framebuffer has no its own gem object, 191 * so that default framebuffer has no its own gem object,
190 * only its own buffer object. 192 * only its own buffer object.
191 */ 193 */
192 exynos_fb->entry = exynos_gem_obj->entry; 194 exynos_fb->buffer = exynos_gem_obj->buffer;
193 195
194 DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n", 196 DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
195 (unsigned long)exynos_fb->entry->paddr, size, 197 (unsigned long)exynos_fb->buffer->dma_addr, size,
196 (unsigned int)&exynos_gem_obj->base); 198 (unsigned int)&exynos_gem_obj->base);
197 199
198out: 200out:
@@ -220,26 +222,36 @@ struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
220 return exynos_drm_fb_init(file_priv, dev, mode_cmd); 222 return exynos_drm_fb_init(file_priv, dev, mode_cmd);
221} 223}
222 224
223struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb) 225struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
224{ 226{
225 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 227 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
226 struct exynos_drm_buf_entry *entry; 228 struct exynos_drm_gem_buf *buffer;
227 229
228 DRM_DEBUG_KMS("%s\n", __FILE__); 230 DRM_DEBUG_KMS("%s\n", __FILE__);
229 231
230 entry = exynos_fb->entry; 232 buffer = exynos_fb->buffer;
231 if (!entry) 233 if (!buffer)
232 return NULL; 234 return NULL;
233 235
234 DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", 236 DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
235 (unsigned long)entry->vaddr, 237 (unsigned long)buffer->kvaddr,
236 (unsigned long)entry->paddr); 238 (unsigned long)buffer->dma_addr);
237 239
238 return entry; 240 return buffer;
241}
242
243static void exynos_drm_output_poll_changed(struct drm_device *dev)
244{
245 struct exynos_drm_private *private = dev->dev_private;
246 struct drm_fb_helper *fb_helper = private->fb_helper;
247
248 if (fb_helper)
249 drm_fb_helper_hotplug_event(fb_helper);
239} 250}
240 251
241static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { 252static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
242 .fb_create = exynos_drm_fb_create, 253 .fb_create = exynos_drm_fb_create,
254 .output_poll_changed = exynos_drm_output_poll_changed,
243}; 255};
244 256
245void exynos_drm_mode_config_init(struct drm_device *dev) 257void exynos_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 81fba29b696d..f79f768a56ca 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -33,6 +33,7 @@
33 33
34#include "exynos_drm_drv.h" 34#include "exynos_drm_drv.h"
35#include "exynos_drm_fb.h" 35#include "exynos_drm_fb.h"
36#include "exynos_drm_gem.h"
36#include "exynos_drm_buf.h" 37#include "exynos_drm_buf.h"
37 38
38#define MAX_CONNECTOR 4 39#define MAX_CONNECTOR 4
@@ -85,15 +86,13 @@ static struct fb_ops exynos_drm_fb_ops = {
85}; 86};
86 87
87static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, 88static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
88 struct drm_framebuffer *fb, 89 struct drm_framebuffer *fb)
89 unsigned int fb_width,
90 unsigned int fb_height)
91{ 90{
92 struct fb_info *fbi = helper->fbdev; 91 struct fb_info *fbi = helper->fbdev;
93 struct drm_device *dev = helper->dev; 92 struct drm_device *dev = helper->dev;
94 struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper); 93 struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
95 struct exynos_drm_buf_entry *entry; 94 struct exynos_drm_gem_buf *buffer;
96 unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3); 95 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
97 unsigned long offset; 96 unsigned long offset;
98 97
99 DRM_DEBUG_KMS("%s\n", __FILE__); 98 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -101,20 +100,20 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
101 exynos_fb->fb = fb; 100 exynos_fb->fb = fb;
102 101
103 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); 102 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
104 drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height); 103 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
105 104
106 entry = exynos_drm_fb_get_buf(fb); 105 buffer = exynos_drm_fb_get_buf(fb);
107 if (!entry) { 106 if (!buffer) {
108 DRM_LOG_KMS("entry is null.\n"); 107 DRM_LOG_KMS("buffer is null.\n");
109 return -EFAULT; 108 return -EFAULT;
110 } 109 }
111 110
112 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); 111 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
113 offset += fbi->var.yoffset * fb->pitches[0]; 112 offset += fbi->var.yoffset * fb->pitches[0];
114 113
115 dev->mode_config.fb_base = entry->paddr; 114 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
116 fbi->screen_base = entry->vaddr + offset; 115 fbi->screen_base = buffer->kvaddr + offset;
117 fbi->fix.smem_start = entry->paddr + offset; 116 fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
118 fbi->screen_size = size; 117 fbi->screen_size = size;
119 fbi->fix.smem_len = size; 118 fbi->fix.smem_len = size;
120 119
@@ -171,8 +170,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
171 goto out; 170 goto out;
172 } 171 }
173 172
174 ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, 173 ret = exynos_drm_fbdev_update(helper, helper->fb);
175 sizes->fb_height);
176 if (ret < 0) 174 if (ret < 0)
177 fb_dealloc_cmap(&fbi->cmap); 175 fb_dealloc_cmap(&fbi->cmap);
178 176
@@ -235,8 +233,7 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
235 } 233 }
236 234
237 helper->fb = exynos_fbdev->fb; 235 helper->fb = exynos_fbdev->fb;
238 return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, 236 return exynos_drm_fbdev_update(helper, helper->fb);
239 sizes->fb_height);
240} 237}
241 238
242static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper, 239static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
@@ -405,6 +402,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
405 fb_helper = private->fb_helper; 402 fb_helper = private->fb_helper;
406 403
407 if (fb_helper) { 404 if (fb_helper) {
405 struct list_head temp_list;
406
407 INIT_LIST_HEAD(&temp_list);
408
409 /*
410 * fb_helper is reintialized but kernel fb is reused
411 * so kernel_fb_list need to be backuped and restored
412 */
413 if (!list_empty(&fb_helper->kernel_fb_list))
414 list_replace_init(&fb_helper->kernel_fb_list,
415 &temp_list);
416
408 drm_fb_helper_fini(fb_helper); 417 drm_fb_helper_fini(fb_helper);
409 418
410 ret = drm_fb_helper_init(dev, fb_helper, 419 ret = drm_fb_helper_init(dev, fb_helper,
@@ -414,6 +423,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
414 return ret; 423 return ret;
415 } 424 }
416 425
426 if (!list_empty(&temp_list))
427 list_replace(&temp_list, &fb_helper->kernel_fb_list);
428
417 ret = drm_fb_helper_single_add_all_connectors(fb_helper); 429 ret = drm_fb_helper_single_add_all_connectors(fb_helper);
418 if (ret < 0) { 430 if (ret < 0) {
419 DRM_ERROR("failed to add fb helper to connectors\n"); 431 DRM_ERROR("failed to add fb helper to connectors\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 4659c88cdd9b..db3b3d9e731d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -64,7 +64,7 @@ struct fimd_win_data {
64 unsigned int fb_width; 64 unsigned int fb_width;
65 unsigned int fb_height; 65 unsigned int fb_height;
66 unsigned int bpp; 66 unsigned int bpp;
67 dma_addr_t paddr; 67 dma_addr_t dma_addr;
68 void __iomem *vaddr; 68 void __iomem *vaddr;
69 unsigned int buf_offsize; 69 unsigned int buf_offsize;
70 unsigned int line_size; /* bytes */ 70 unsigned int line_size; /* bytes */
@@ -124,7 +124,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
124 return 0; 124 return 0;
125} 125}
126 126
127static struct exynos_drm_display fimd_display = { 127static struct exynos_drm_display_ops fimd_display_ops = {
128 .type = EXYNOS_DISPLAY_TYPE_LCD, 128 .type = EXYNOS_DISPLAY_TYPE_LCD,
129 .is_connected = fimd_display_is_connected, 129 .is_connected = fimd_display_is_connected,
130 .get_timing = fimd_get_timing, 130 .get_timing = fimd_get_timing,
@@ -177,6 +177,40 @@ static void fimd_commit(struct device *dev)
177 writel(val, ctx->regs + VIDCON0); 177 writel(val, ctx->regs + VIDCON0);
178} 178}
179 179
180static void fimd_disable(struct device *dev)
181{
182 struct fimd_context *ctx = get_fimd_context(dev);
183 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
184 struct drm_device *drm_dev = subdrv->drm_dev;
185 struct exynos_drm_manager *manager = &subdrv->manager;
186 u32 val;
187
188 DRM_DEBUG_KMS("%s\n", __FILE__);
189
190 /* fimd dma off */
191 val = readl(ctx->regs + VIDCON0);
192 val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
193 writel(val, ctx->regs + VIDCON0);
194
195 /*
196 * if vblank is enabled status with dma off then
197 * it disables vsync interrupt.
198 */
199 if (drm_dev->vblank_enabled[manager->pipe] &&
200 atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
201 drm_vblank_put(drm_dev, manager->pipe);
202
203 /*
204 * if vblank_disable_allowed is 0 then disable
205 * vsync interrupt right now else the vsync interrupt
206 * would be disabled by drm timer once a current process
207 * gives up ownershop of vblank event.
208 */
209 if (!drm_dev->vblank_disable_allowed)
210 drm_vblank_off(drm_dev, manager->pipe);
211 }
212}
213
180static int fimd_enable_vblank(struct device *dev) 214static int fimd_enable_vblank(struct device *dev)
181{ 215{
182 struct fimd_context *ctx = get_fimd_context(dev); 216 struct fimd_context *ctx = get_fimd_context(dev);
@@ -220,6 +254,7 @@ static void fimd_disable_vblank(struct device *dev)
220 254
221static struct exynos_drm_manager_ops fimd_manager_ops = { 255static struct exynos_drm_manager_ops fimd_manager_ops = {
222 .commit = fimd_commit, 256 .commit = fimd_commit,
257 .disable = fimd_disable,
223 .enable_vblank = fimd_enable_vblank, 258 .enable_vblank = fimd_enable_vblank,
224 .disable_vblank = fimd_disable_vblank, 259 .disable_vblank = fimd_disable_vblank,
225}; 260};
@@ -251,7 +286,7 @@ static void fimd_win_mode_set(struct device *dev,
251 win_data->ovl_height = overlay->crtc_height; 286 win_data->ovl_height = overlay->crtc_height;
252 win_data->fb_width = overlay->fb_width; 287 win_data->fb_width = overlay->fb_width;
253 win_data->fb_height = overlay->fb_height; 288 win_data->fb_height = overlay->fb_height;
254 win_data->paddr = overlay->paddr + offset; 289 win_data->dma_addr = overlay->dma_addr + offset;
255 win_data->vaddr = overlay->vaddr + offset; 290 win_data->vaddr = overlay->vaddr + offset;
256 win_data->bpp = overlay->bpp; 291 win_data->bpp = overlay->bpp;
257 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * 292 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
@@ -263,7 +298,7 @@ static void fimd_win_mode_set(struct device *dev,
263 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 298 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
264 win_data->ovl_width, win_data->ovl_height); 299 win_data->ovl_width, win_data->ovl_height);
265 DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", 300 DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
266 (unsigned long)win_data->paddr, 301 (unsigned long)win_data->dma_addr,
267 (unsigned long)win_data->vaddr); 302 (unsigned long)win_data->vaddr);
268 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", 303 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
269 overlay->fb_width, overlay->crtc_width); 304 overlay->fb_width, overlay->crtc_width);
@@ -376,16 +411,16 @@ static void fimd_win_commit(struct device *dev)
376 writel(val, ctx->regs + SHADOWCON); 411 writel(val, ctx->regs + SHADOWCON);
377 412
378 /* buffer start address */ 413 /* buffer start address */
379 val = win_data->paddr; 414 val = (unsigned long)win_data->dma_addr;
380 writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); 415 writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
381 416
382 /* buffer end address */ 417 /* buffer end address */
383 size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); 418 size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
384 val = win_data->paddr + size; 419 val = (unsigned long)(win_data->dma_addr + size);
385 writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); 420 writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
386 421
387 DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", 422 DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
388 (unsigned long)win_data->paddr, val, size); 423 (unsigned long)win_data->dma_addr, val, size);
389 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 424 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
390 win_data->ovl_width, win_data->ovl_height); 425 win_data->ovl_width, win_data->ovl_height);
391 426
@@ -447,7 +482,6 @@ static void fimd_win_commit(struct device *dev)
447static void fimd_win_disable(struct device *dev) 482static void fimd_win_disable(struct device *dev)
448{ 483{
449 struct fimd_context *ctx = get_fimd_context(dev); 484 struct fimd_context *ctx = get_fimd_context(dev);
450 struct fimd_win_data *win_data;
451 int win = ctx->default_win; 485 int win = ctx->default_win;
452 u32 val; 486 u32 val;
453 487
@@ -456,8 +490,6 @@ static void fimd_win_disable(struct device *dev)
456 if (win < 0 || win > WINDOWS_NR) 490 if (win < 0 || win > WINDOWS_NR)
457 return; 491 return;
458 492
459 win_data = &ctx->win_data[win];
460
461 /* protect windows */ 493 /* protect windows */
462 val = readl(ctx->regs + SHADOWCON); 494 val = readl(ctx->regs + SHADOWCON);
463 val |= SHADOWCON_WINx_PROTECT(win); 495 val |= SHADOWCON_WINx_PROTECT(win);
@@ -528,6 +560,16 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
528 /* VSYNC interrupt */ 560 /* VSYNC interrupt */
529 writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1); 561 writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
530 562
563 /*
564 * in case that vblank_disable_allowed is 1, it could induce
565 * the problem that manager->pipe could be -1 because with
566 * disable callback, vsync interrupt isn't disabled and at this moment,
567 * vsync interrupt could occur. the vsync interrupt would be disabled
568 * by timer handler later.
569 */
570 if (manager->pipe == -1)
571 return IRQ_HANDLED;
572
531 drm_handle_vblank(drm_dev, manager->pipe); 573 drm_handle_vblank(drm_dev, manager->pipe);
532 fimd_finish_pageflip(drm_dev, manager->pipe); 574 fimd_finish_pageflip(drm_dev, manager->pipe);
533 575
@@ -548,13 +590,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
548 */ 590 */
549 drm_dev->irq_enabled = 1; 591 drm_dev->irq_enabled = 1;
550 592
551 /*
552 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
553 * by drm timer once a current process gives up ownership of
554 * vblank event.(drm_vblank_put function was called)
555 */
556 drm_dev->vblank_disable_allowed = 1;
557
558 return 0; 593 return 0;
559} 594}
560 595
@@ -731,7 +766,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
731 subdrv->manager.pipe = -1; 766 subdrv->manager.pipe = -1;
732 subdrv->manager.ops = &fimd_manager_ops; 767 subdrv->manager.ops = &fimd_manager_ops;
733 subdrv->manager.overlay_ops = &fimd_overlay_ops; 768 subdrv->manager.overlay_ops = &fimd_overlay_ops;
734 subdrv->manager.display = &fimd_display; 769 subdrv->manager.display_ops = &fimd_display_ops;
735 subdrv->manager.dev = dev; 770 subdrv->manager.dev = dev;
736 771
737 platform_set_drvdata(pdev, ctx); 772 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index a8e7a88906ed..aba0fe47f7ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -62,40 +62,28 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
62 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; 62 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
63} 63}
64 64
65struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, 65static struct exynos_drm_gem_obj
66 struct drm_device *dev, unsigned int size, 66 *exynos_drm_gem_init(struct drm_device *drm_dev,
67 unsigned int *handle) 67 struct drm_file *file_priv, unsigned int *handle,
68 unsigned int size)
68{ 69{
69 struct exynos_drm_gem_obj *exynos_gem_obj; 70 struct exynos_drm_gem_obj *exynos_gem_obj;
70 struct exynos_drm_buf_entry *entry;
71 struct drm_gem_object *obj; 71 struct drm_gem_object *obj;
72 int ret; 72 int ret;
73 73
74 DRM_DEBUG_KMS("%s\n", __FILE__);
75
76 size = roundup(size, PAGE_SIZE);
77
78 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); 74 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
79 if (!exynos_gem_obj) { 75 if (!exynos_gem_obj) {
80 DRM_ERROR("failed to allocate exynos gem object.\n"); 76 DRM_ERROR("failed to allocate exynos gem object.\n");
81 return ERR_PTR(-ENOMEM); 77 return ERR_PTR(-ENOMEM);
82 } 78 }
83 79
84 /* allocate the new buffer object and memory region. */
85 entry = exynos_drm_buf_create(dev, size);
86 if (!entry) {
87 kfree(exynos_gem_obj);
88 return ERR_PTR(-ENOMEM);
89 }
90
91 exynos_gem_obj->entry = entry;
92
93 obj = &exynos_gem_obj->base; 80 obj = &exynos_gem_obj->base;
94 81
95 ret = drm_gem_object_init(dev, obj, size); 82 ret = drm_gem_object_init(drm_dev, obj, size);
96 if (ret < 0) { 83 if (ret < 0) {
97 DRM_ERROR("failed to initailize gem object.\n"); 84 DRM_ERROR("failed to initialize gem object.\n");
98 goto err_obj_init; 85 ret = -EINVAL;
86 goto err_object_init;
99 } 87 }
100 88
101 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 89 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -127,24 +115,50 @@ err_handle_create:
127err_create_mmap_offset: 115err_create_mmap_offset:
128 drm_gem_object_release(obj); 116 drm_gem_object_release(obj);
129 117
130err_obj_init: 118err_object_init:
131 exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
132
133 kfree(exynos_gem_obj); 119 kfree(exynos_gem_obj);
134 120
135 return ERR_PTR(ret); 121 return ERR_PTR(ret);
136} 122}
137 123
124struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
125 struct drm_file *file_priv,
126 unsigned int *handle, unsigned long size)
127{
128
129 struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
130 struct exynos_drm_gem_buf *buffer;
131
132 size = roundup(size, PAGE_SIZE);
133
134 DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
135
136 buffer = exynos_drm_buf_create(dev, size);
137 if (IS_ERR(buffer)) {
138 return ERR_CAST(buffer);
139 }
140
141 exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
142 if (IS_ERR(exynos_gem_obj)) {
143 exynos_drm_buf_destroy(dev, buffer);
144 return exynos_gem_obj;
145 }
146
147 exynos_gem_obj->buffer = buffer;
148
149 return exynos_gem_obj;
150}
151
138int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 152int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
139 struct drm_file *file_priv) 153 struct drm_file *file_priv)
140{ 154{
141 struct drm_exynos_gem_create *args = data; 155 struct drm_exynos_gem_create *args = data;
142 struct exynos_drm_gem_obj *exynos_gem_obj; 156 struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
143 157
144 DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size); 158 DRM_DEBUG_KMS("%s\n", __FILE__);
145 159
146 exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, 160 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
147 &args->handle); 161 &args->handle, args->size);
148 if (IS_ERR(exynos_gem_obj)) 162 if (IS_ERR(exynos_gem_obj))
149 return PTR_ERR(exynos_gem_obj); 163 return PTR_ERR(exynos_gem_obj);
150 164
@@ -175,7 +189,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
175{ 189{
176 struct drm_gem_object *obj = filp->private_data; 190 struct drm_gem_object *obj = filp->private_data;
177 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 191 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
178 struct exynos_drm_buf_entry *entry; 192 struct exynos_drm_gem_buf *buffer;
179 unsigned long pfn, vm_size; 193 unsigned long pfn, vm_size;
180 194
181 DRM_DEBUG_KMS("%s\n", __FILE__); 195 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -187,20 +201,20 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
187 201
188 vm_size = vma->vm_end - vma->vm_start; 202 vm_size = vma->vm_end - vma->vm_start;
189 /* 203 /*
190 * a entry contains information to physically continuous memory 204 * a buffer contains information to physically continuous memory
191 * allocated by user request or at framebuffer creation. 205 * allocated by user request or at framebuffer creation.
192 */ 206 */
193 entry = exynos_gem_obj->entry; 207 buffer = exynos_gem_obj->buffer;
194 208
195 /* check if user-requested size is valid. */ 209 /* check if user-requested size is valid. */
196 if (vm_size > entry->size) 210 if (vm_size > buffer->size)
197 return -EINVAL; 211 return -EINVAL;
198 212
199 /* 213 /*
200 * get page frame number to physical memory to be mapped 214 * get page frame number to physical memory to be mapped
201 * to user space. 215 * to user space.
202 */ 216 */
203 pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT; 217 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
204 218
205 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); 219 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
206 220
@@ -281,7 +295,7 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
281 295
282 exynos_gem_obj = to_exynos_gem_obj(gem_obj); 296 exynos_gem_obj = to_exynos_gem_obj(gem_obj);
283 297
284 exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry); 298 exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
285 299
286 kfree(exynos_gem_obj); 300 kfree(exynos_gem_obj);
287} 301}
@@ -302,8 +316,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
302 args->pitch = args->width * args->bpp >> 3; 316 args->pitch = args->width * args->bpp >> 3;
303 args->size = args->pitch * args->height; 317 args->size = args->pitch * args->height;
304 318
305 exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, 319 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
306 &args->handle); 320 args->size);
307 if (IS_ERR(exynos_gem_obj)) 321 if (IS_ERR(exynos_gem_obj))
308 return PTR_ERR(exynos_gem_obj); 322 return PTR_ERR(exynos_gem_obj);
309 323
@@ -360,7 +374,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
360 374
361 mutex_lock(&dev->struct_mutex); 375 mutex_lock(&dev->struct_mutex);
362 376
363 pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset; 377 pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
378 PAGE_SHIFT) + page_offset;
364 379
365 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); 380 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
366 381
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e5fc0148277b..ef8797334e6d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -30,13 +30,29 @@
30 struct exynos_drm_gem_obj, base) 30 struct exynos_drm_gem_obj, base)
31 31
32/* 32/*
33 * exynos drm gem buffer structure.
34 *
35 * @kvaddr: kernel virtual address to allocated memory region.
36 * @dma_addr: bus address(accessed by dma) to allocated memory region.
37 * - this address could be physical address without IOMMU and
38 * device address with IOMMU.
39 * @size: size of allocated memory region.
40 */
41struct exynos_drm_gem_buf {
42 void __iomem *kvaddr;
43 dma_addr_t dma_addr;
44 unsigned long size;
45};
46
47/*
33 * exynos drm buffer structure. 48 * exynos drm buffer structure.
34 * 49 *
35 * @base: a gem object. 50 * @base: a gem object.
36 * - a new handle to this gem object would be created 51 * - a new handle to this gem object would be created
37 * by drm_gem_handle_create(). 52 * by drm_gem_handle_create().
38 * @entry: pointer to exynos drm buffer entry object. 53 * @buffer: a pointer to exynos_drm_gem_buffer object.
39 * - containing the information to physically 54 * - contain the information to memory region allocated
55 * by user request or at framebuffer creation.
40 * continuous memory region allocated by user request 56 * continuous memory region allocated by user request
41 * or at framebuffer creation. 57 * or at framebuffer creation.
42 * 58 *
@@ -45,13 +61,13 @@
45 */ 61 */
46struct exynos_drm_gem_obj { 62struct exynos_drm_gem_obj {
47 struct drm_gem_object base; 63 struct drm_gem_object base;
48 struct exynos_drm_buf_entry *entry; 64 struct exynos_drm_gem_buf *buffer;
49}; 65};
50 66
51/* create a new buffer and get a new gem handle. */ 67/* create a new buffer and get a new gem handle. */
52struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, 68struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
53 struct drm_device *dev, unsigned int size, 69 struct drm_file *file_priv,
54 unsigned int *handle); 70 unsigned int *handle, unsigned long size);
55 71
56/* 72/*
57 * request gem object creation and buffer allocation as the size 73 * request gem object creation and buffer allocation as the size
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d14b44e13f51..004b048c5192 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
62 const struct intel_device_info *info = INTEL_INFO(dev); 62 const struct intel_device_info *info = INTEL_INFO(dev);
63 63
64 seq_printf(m, "gen: %d\n", info->gen); 64 seq_printf(m, "gen: %d\n", info->gen);
65 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
65#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 66#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
66 B(is_mobile); 67 B(is_mobile);
67 B(is_i85x); 68 B(is_i85x);
@@ -636,11 +637,16 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
636 struct drm_device *dev = node->minor->dev; 637 struct drm_device *dev = node->minor->dev;
637 drm_i915_private_t *dev_priv = dev->dev_private; 638 drm_i915_private_t *dev_priv = dev->dev_private;
638 struct intel_ring_buffer *ring; 639 struct intel_ring_buffer *ring;
640 int ret;
639 641
640 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 642 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
641 if (ring->size == 0) 643 if (ring->size == 0)
642 return 0; 644 return 0;
643 645
646 ret = mutex_lock_interruptible(&dev->struct_mutex);
647 if (ret)
648 return ret;
649
644 seq_printf(m, "Ring %s:\n", ring->name); 650 seq_printf(m, "Ring %s:\n", ring->name);
645 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); 651 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
646 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); 652 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
@@ -654,6 +660,8 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
654 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 660 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
655 seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); 661 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
656 662
663 mutex_unlock(&dev->struct_mutex);
664
657 return 0; 665 return 0;
658} 666}
659 667
@@ -842,7 +850,16 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
842 struct drm_info_node *node = (struct drm_info_node *) m->private; 850 struct drm_info_node *node = (struct drm_info_node *) m->private;
843 struct drm_device *dev = node->minor->dev; 851 struct drm_device *dev = node->minor->dev;
844 drm_i915_private_t *dev_priv = dev->dev_private; 852 drm_i915_private_t *dev_priv = dev->dev_private;
845 u16 crstanddelay = I915_READ16(CRSTANDVID); 853 u16 crstanddelay;
854 int ret;
855
856 ret = mutex_lock_interruptible(&dev->struct_mutex);
857 if (ret)
858 return ret;
859
860 crstanddelay = I915_READ16(CRSTANDVID);
861
862 mutex_unlock(&dev->struct_mutex);
846 863
847 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 864 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
848 865
@@ -940,7 +957,11 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
940 struct drm_device *dev = node->minor->dev; 957 struct drm_device *dev = node->minor->dev;
941 drm_i915_private_t *dev_priv = dev->dev_private; 958 drm_i915_private_t *dev_priv = dev->dev_private;
942 u32 delayfreq; 959 u32 delayfreq;
943 int i; 960 int ret, i;
961
962 ret = mutex_lock_interruptible(&dev->struct_mutex);
963 if (ret)
964 return ret;
944 965
945 for (i = 0; i < 16; i++) { 966 for (i = 0; i < 16; i++) {
946 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 967 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -948,6 +969,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
948 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 969 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
949 } 970 }
950 971
972 mutex_unlock(&dev->struct_mutex);
973
951 return 0; 974 return 0;
952} 975}
953 976
@@ -962,13 +985,19 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
962 struct drm_device *dev = node->minor->dev; 985 struct drm_device *dev = node->minor->dev;
963 drm_i915_private_t *dev_priv = dev->dev_private; 986 drm_i915_private_t *dev_priv = dev->dev_private;
964 u32 inttoext; 987 u32 inttoext;
965 int i; 988 int ret, i;
989
990 ret = mutex_lock_interruptible(&dev->struct_mutex);
991 if (ret)
992 return ret;
966 993
967 for (i = 1; i <= 32; i++) { 994 for (i = 1; i <= 32; i++) {
968 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 995 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
969 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 996 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
970 } 997 }
971 998
999 mutex_unlock(&dev->struct_mutex);
1000
972 return 0; 1001 return 0;
973} 1002}
974 1003
@@ -977,9 +1006,19 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
977 struct drm_info_node *node = (struct drm_info_node *) m->private; 1006 struct drm_info_node *node = (struct drm_info_node *) m->private;
978 struct drm_device *dev = node->minor->dev; 1007 struct drm_device *dev = node->minor->dev;
979 drm_i915_private_t *dev_priv = dev->dev_private; 1008 drm_i915_private_t *dev_priv = dev->dev_private;
980 u32 rgvmodectl = I915_READ(MEMMODECTL); 1009 u32 rgvmodectl, rstdbyctl;
981 u32 rstdbyctl = I915_READ(RSTDBYCTL); 1010 u16 crstandvid;
982 u16 crstandvid = I915_READ16(CRSTANDVID); 1011 int ret;
1012
1013 ret = mutex_lock_interruptible(&dev->struct_mutex);
1014 if (ret)
1015 return ret;
1016
1017 rgvmodectl = I915_READ(MEMMODECTL);
1018 rstdbyctl = I915_READ(RSTDBYCTL);
1019 crstandvid = I915_READ16(CRSTANDVID);
1020
1021 mutex_unlock(&dev->struct_mutex);
983 1022
984 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1023 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
985 "yes" : "no"); 1024 "yes" : "no");
@@ -1167,9 +1206,16 @@ static int i915_gfxec(struct seq_file *m, void *unused)
1167 struct drm_info_node *node = (struct drm_info_node *) m->private; 1206 struct drm_info_node *node = (struct drm_info_node *) m->private;
1168 struct drm_device *dev = node->minor->dev; 1207 struct drm_device *dev = node->minor->dev;
1169 drm_i915_private_t *dev_priv = dev->dev_private; 1208 drm_i915_private_t *dev_priv = dev->dev_private;
1209 int ret;
1210
1211 ret = mutex_lock_interruptible(&dev->struct_mutex);
1212 if (ret)
1213 return ret;
1170 1214
1171 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1215 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1172 1216
1217 mutex_unlock(&dev->struct_mutex);
1218
1173 return 0; 1219 return 0;
1174} 1220}
1175 1221
@@ -1506,7 +1552,10 @@ drm_add_fake_info_node(struct drm_minor *minor,
1506 node->minor = minor; 1552 node->minor = minor;
1507 node->dent = ent; 1553 node->dent = ent;
1508 node->info_ent = (void *) key; 1554 node->info_ent = (void *) key;
1509 list_add(&node->list, &minor->debugfs_nodes.list); 1555
1556 mutex_lock(&minor->debugfs_lock);
1557 list_add(&node->list, &minor->debugfs_list);
1558 mutex_unlock(&minor->debugfs_lock);
1510 1559
1511 return 0; 1560 return 0;
1512} 1561}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a9533c54c93c..a9ae374861e7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1454,6 +1454,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1454 1454
1455 diff1 = now - dev_priv->last_time1; 1455 diff1 = now - dev_priv->last_time1;
1456 1456
1457 /* Prevent division-by-zero if we are asking too fast.
1458 * Also, we don't get interesting results if we are polling
1459 * faster than once in 10ms, so just return the saved value
1460 * in such cases.
1461 */
1462 if (diff1 <= 10)
1463 return dev_priv->chipset_power;
1464
1457 count1 = I915_READ(DMIEC); 1465 count1 = I915_READ(DMIEC);
1458 count2 = I915_READ(DDREC); 1466 count2 = I915_READ(DDREC);
1459 count3 = I915_READ(CSIEC); 1467 count3 = I915_READ(CSIEC);
@@ -1484,6 +1492,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1484 dev_priv->last_count1 = total_count; 1492 dev_priv->last_count1 = total_count;
1485 dev_priv->last_time1 = now; 1493 dev_priv->last_time1 = now;
1486 1494
1495 dev_priv->chipset_power = ret;
1496
1487 return ret; 1497 return ret;
1488} 1498}
1489 1499
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9f592703c369..22c8ab70db2c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,17 +58,17 @@ module_param_named(powersave, i915_powersave, int, 0600);
58MODULE_PARM_DESC(powersave, 58MODULE_PARM_DESC(powersave,
59 "Enable powersavings, fbc, downclocking, etc. (default: true)"); 59 "Enable powersavings, fbc, downclocking, etc. (default: true)");
60 60
61unsigned int i915_semaphores __read_mostly = 0; 61int i915_semaphores __read_mostly = -1;
62module_param_named(semaphores, i915_semaphores, int, 0600); 62module_param_named(semaphores, i915_semaphores, int, 0600);
63MODULE_PARM_DESC(semaphores, 63MODULE_PARM_DESC(semaphores,
64 "Use semaphores for inter-ring sync (default: false)"); 64 "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
65 65
66unsigned int i915_enable_rc6 __read_mostly = 0; 66int i915_enable_rc6 __read_mostly = -1;
67module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); 67module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
68MODULE_PARM_DESC(i915_enable_rc6, 68MODULE_PARM_DESC(i915_enable_rc6,
69 "Enable power-saving render C-state 6 (default: true)"); 69 "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
70 70
71unsigned int i915_enable_fbc __read_mostly = -1; 71int i915_enable_fbc __read_mostly = -1;
72module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 72module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
73MODULE_PARM_DESC(i915_enable_fbc, 73MODULE_PARM_DESC(i915_enable_fbc,
74 "Enable frame buffer compression for power savings " 74 "Enable frame buffer compression for power savings "
@@ -80,7 +80,7 @@ MODULE_PARM_DESC(lvds_downclock,
80 "Use panel (LVDS/eDP) downclocking for power savings " 80 "Use panel (LVDS/eDP) downclocking for power savings "
81 "(default: false)"); 81 "(default: false)");
82 82
83unsigned int i915_panel_use_ssc __read_mostly = -1; 83int i915_panel_use_ssc __read_mostly = -1;
84module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 84module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
85MODULE_PARM_DESC(lvds_use_ssc, 85MODULE_PARM_DESC(lvds_use_ssc,
86 "Use Spread Spectrum Clock with panels [LVDS/eDP] " 86 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
@@ -107,7 +107,7 @@ static struct drm_driver driver;
107extern int intel_agp_enabled; 107extern int intel_agp_enabled;
108 108
109#define INTEL_VGA_DEVICE(id, info) { \ 109#define INTEL_VGA_DEVICE(id, info) { \
110 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 110 .class = PCI_BASE_CLASS_DISPLAY << 16, \
111 .class_mask = 0xff0000, \ 111 .class_mask = 0xff0000, \
112 .vendor = 0x8086, \ 112 .vendor = 0x8086, \
113 .device = id, \ 113 .device = id, \
@@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
328 } 328 }
329} 329}
330 330
331static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 331void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
332{ 332{
333 int count; 333 int count;
334 334
@@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
344 udelay(10); 344 udelay(10);
345} 345}
346 346
347void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
348{
349 int count;
350
351 count = 0;
352 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
353 udelay(10);
354
355 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
356 POSTING_READ(FORCEWAKE_MT);
357
358 count = 0;
359 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
360 udelay(10);
361}
362
347/* 363/*
348 * Generally this is called implicitly by the register read function. However, 364 * Generally this is called implicitly by the register read function. However,
349 * if some sequence requires the GT to not power down then this function should 365 * if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
356 372
357 /* Forcewake is atomic in case we get in here without the lock */ 373 /* Forcewake is atomic in case we get in here without the lock */
358 if (atomic_add_return(1, &dev_priv->forcewake_count) == 1) 374 if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
359 __gen6_gt_force_wake_get(dev_priv); 375 dev_priv->display.force_wake_get(dev_priv);
360} 376}
361 377
362static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 378void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
363{ 379{
364 I915_WRITE_NOTRACE(FORCEWAKE, 0); 380 I915_WRITE_NOTRACE(FORCEWAKE, 0);
365 POSTING_READ(FORCEWAKE); 381 POSTING_READ(FORCEWAKE);
366} 382}
367 383
384void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
385{
386 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
387 POSTING_READ(FORCEWAKE_MT);
388}
389
368/* 390/*
369 * see gen6_gt_force_wake_get() 391 * see gen6_gt_force_wake_get()
370 */ 392 */
@@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
373 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 395 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
374 396
375 if (atomic_dec_and_test(&dev_priv->forcewake_count)) 397 if (atomic_dec_and_test(&dev_priv->forcewake_count))
376 __gen6_gt_force_wake_put(dev_priv); 398 dev_priv->display.force_wake_put(dev_priv);
377} 399}
378 400
379void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 401void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -804,8 +826,8 @@ static const struct file_operations i915_driver_fops = {
804}; 826};
805 827
806static struct drm_driver driver = { 828static struct drm_driver driver = {
807 /* don't use mtrr's here, the Xserver or user space app should 829 /* Don't use MTRRs here; the Xserver or userspace app should
808 * deal with them for intel hardware. 830 * deal with them for Intel hardware.
809 */ 831 */
810 .driver_features = 832 .driver_features =
811 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 833 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
@@ -904,8 +926,9 @@ MODULE_LICENSE("GPL and additional rights");
904/* We give fast paths for the really cool registers */ 926/* We give fast paths for the really cool registers */
905#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 927#define NEEDS_FORCE_WAKE(dev_priv, reg) \
906 (((dev_priv)->info->gen >= 6) && \ 928 (((dev_priv)->info->gen >= 6) && \
907 ((reg) < 0x40000) && \ 929 ((reg) < 0x40000) && \
908 ((reg) != FORCEWAKE)) 930 ((reg) != FORCEWAKE) && \
931 ((reg) != ECOBUS))
909 932
910#define __i915_read(x, y) \ 933#define __i915_read(x, y) \
911u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 934u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 06a37f4fd74b..554bef7a3b9c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -107,6 +107,7 @@ struct opregion_header;
107struct opregion_acpi; 107struct opregion_acpi;
108struct opregion_swsci; 108struct opregion_swsci;
109struct opregion_asle; 109struct opregion_asle;
110struct drm_i915_private;
110 111
111struct intel_opregion { 112struct intel_opregion {
112 struct opregion_header *header; 113 struct opregion_header *header;
@@ -126,6 +127,9 @@ struct drm_i915_master_private {
126 struct _drm_i915_sarea *sarea_priv; 127 struct _drm_i915_sarea *sarea_priv;
127}; 128};
128#define I915_FENCE_REG_NONE -1 129#define I915_FENCE_REG_NONE -1
130#define I915_MAX_NUM_FENCES 16
131/* 16 fences + sign bit for FENCE_REG_NONE */
132#define I915_MAX_NUM_FENCE_BITS 5
129 133
130struct drm_i915_fence_reg { 134struct drm_i915_fence_reg {
131 struct list_head lru_list; 135 struct list_head lru_list;
@@ -168,7 +172,7 @@ struct drm_i915_error_state {
168 u32 instdone1; 172 u32 instdone1;
169 u32 seqno; 173 u32 seqno;
170 u64 bbaddr; 174 u64 bbaddr;
171 u64 fence[16]; 175 u64 fence[I915_MAX_NUM_FENCES];
172 struct timeval time; 176 struct timeval time;
173 struct drm_i915_error_object { 177 struct drm_i915_error_object {
174 int page_count; 178 int page_count;
@@ -182,7 +186,7 @@ struct drm_i915_error_state {
182 u32 gtt_offset; 186 u32 gtt_offset;
183 u32 read_domains; 187 u32 read_domains;
184 u32 write_domain; 188 u32 write_domain;
185 s32 fence_reg:5; 189 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
186 s32 pinned:2; 190 s32 pinned:2;
187 u32 tiling:2; 191 u32 tiling:2;
188 u32 dirty:1; 192 u32 dirty:1;
@@ -218,6 +222,8 @@ struct drm_i915_display_funcs {
218 struct drm_i915_gem_object *obj); 222 struct drm_i915_gem_object *obj);
219 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 223 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
220 int x, int y); 224 int x, int y);
225 void (*force_wake_get)(struct drm_i915_private *dev_priv);
226 void (*force_wake_put)(struct drm_i915_private *dev_priv);
221 /* clock updates for mode set */ 227 /* clock updates for mode set */
222 /* cursor updates */ 228 /* cursor updates */
223 /* render clock increase/decrease */ 229 /* render clock increase/decrease */
@@ -375,7 +381,7 @@ typedef struct drm_i915_private {
375 struct notifier_block lid_notifier; 381 struct notifier_block lid_notifier;
376 382
377 int crt_ddc_pin; 383 int crt_ddc_pin;
378 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 384 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
379 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 385 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
380 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 386 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
381 387
@@ -506,7 +512,7 @@ typedef struct drm_i915_private {
506 u8 saveAR[21]; 512 u8 saveAR[21];
507 u8 saveDACMASK; 513 u8 saveDACMASK;
508 u8 saveCR[37]; 514 u8 saveCR[37];
509 uint64_t saveFENCE[16]; 515 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
510 u32 saveCURACNTR; 516 u32 saveCURACNTR;
511 u32 saveCURAPOS; 517 u32 saveCURAPOS;
512 u32 saveCURABASE; 518 u32 saveCURABASE;
@@ -707,6 +713,7 @@ typedef struct drm_i915_private {
707 713
708 u64 last_count1; 714 u64 last_count1;
709 unsigned long last_time1; 715 unsigned long last_time1;
716 unsigned long chipset_power;
710 u64 last_count2; 717 u64 last_count2;
711 struct timespec last_time2; 718 struct timespec last_time2;
712 unsigned long gfx_power; 719 unsigned long gfx_power;
@@ -777,10 +784,8 @@ struct drm_i915_gem_object {
777 * Fence register bits (if any) for this object. Will be set 784 * Fence register bits (if any) for this object. Will be set
778 * as needed when mapped into the GTT. 785 * as needed when mapped into the GTT.
779 * Protected by dev->struct_mutex. 786 * Protected by dev->struct_mutex.
780 *
781 * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
782 */ 787 */
783 signed int fence_reg:5; 788 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
784 789
785 /** 790 /**
786 * Advice: are the backing pages purgeable? 791 * Advice: are the backing pages purgeable?
@@ -997,12 +1002,12 @@ extern int i915_max_ioctl;
997extern unsigned int i915_fbpercrtc __always_unused; 1002extern unsigned int i915_fbpercrtc __always_unused;
998extern int i915_panel_ignore_lid __read_mostly; 1003extern int i915_panel_ignore_lid __read_mostly;
999extern unsigned int i915_powersave __read_mostly; 1004extern unsigned int i915_powersave __read_mostly;
1000extern unsigned int i915_semaphores __read_mostly; 1005extern int i915_semaphores __read_mostly;
1001extern unsigned int i915_lvds_downclock __read_mostly; 1006extern unsigned int i915_lvds_downclock __read_mostly;
1002extern unsigned int i915_panel_use_ssc __read_mostly; 1007extern int i915_panel_use_ssc __read_mostly;
1003extern int i915_vbt_sdvo_panel_type __read_mostly; 1008extern int i915_vbt_sdvo_panel_type __read_mostly;
1004extern unsigned int i915_enable_rc6 __read_mostly; 1009extern int i915_enable_rc6 __read_mostly;
1005extern unsigned int i915_enable_fbc __read_mostly; 1010extern int i915_enable_fbc __read_mostly;
1006extern bool i915_enable_hangcheck __read_mostly; 1011extern bool i915_enable_hangcheck __read_mostly;
1007 1012
1008extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1013extern int i915_suspend(struct drm_device *dev, pm_message_t state);
@@ -1307,6 +1312,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
1307extern void intel_detect_pch(struct drm_device *dev); 1312extern void intel_detect_pch(struct drm_device *dev);
1308extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1313extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1309 1314
1315extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1316extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
1317extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1318extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
1319
1310/* overlay */ 1320/* overlay */
1311#ifdef CONFIG_DEBUG_FS 1321#ifdef CONFIG_DEBUG_FS
1312extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 1322extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1351,8 +1361,9 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1351/* We give fast paths for the really cool registers */ 1361/* We give fast paths for the really cool registers */
1352#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 1362#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1353 (((dev_priv)->info->gen >= 6) && \ 1363 (((dev_priv)->info->gen >= 6) && \
1354 ((reg) < 0x40000) && \ 1364 ((reg) < 0x40000) && \
1355 ((reg) != FORCEWAKE)) 1365 ((reg) != FORCEWAKE) && \
1366 ((reg) != ECOBUS))
1356 1367
1357#define __i915_read(x, y) \ 1368#define __i915_read(x, y) \
1358 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 1369 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6651c36b6e8a..8359dc777041 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1396,7 +1396,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
1396 1396
1397 if (obj->base.size > dev_priv->mm.gtt_mappable_end) { 1397 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1398 ret = -E2BIG; 1398 ret = -E2BIG;
1399 goto unlock; 1399 goto out;
1400 } 1400 }
1401 1401
1402 if (obj->madv != I915_MADV_WILLNEED) { 1402 if (obj->madv != I915_MADV_WILLNEED) {
@@ -1745,7 +1745,7 @@ static void i915_gem_reset_fences(struct drm_device *dev)
1745 struct drm_i915_private *dev_priv = dev->dev_private; 1745 struct drm_i915_private *dev_priv = dev->dev_private;
1746 int i; 1746 int i;
1747 1747
1748 for (i = 0; i < 16; i++) { 1748 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1750 struct drm_i915_gem_object *obj = reg->obj; 1750 struct drm_i915_gem_object *obj = reg->obj;
1751 1751
@@ -3512,9 +3512,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3512 * so emit a request to do so. 3512 * so emit a request to do so.
3513 */ 3513 */
3514 request = kzalloc(sizeof(*request), GFP_KERNEL); 3514 request = kzalloc(sizeof(*request), GFP_KERNEL);
3515 if (request) 3515 if (request) {
3516 ret = i915_add_request(obj->ring, NULL, request); 3516 ret = i915_add_request(obj->ring, NULL, request);
3517 else 3517 if (ret)
3518 kfree(request);
3519 } else
3518 ret = -ENOMEM; 3520 ret = -ENOMEM;
3519 } 3521 }
3520 3522
@@ -3613,7 +3615,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3613 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3615 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3614 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3616 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3615 3617
3616 if (IS_GEN6(dev)) { 3618 if (IS_GEN6(dev) || IS_GEN7(dev)) {
3617 /* On Gen6, we can have the GPU use the LLC (the CPU 3619 /* On Gen6, we can have the GPU use the LLC (the CPU
3618 * cache) for about a 10% performance improvement 3620 * cache) for about a 10% performance improvement
3619 * compared to uncached. Graphics requests other than 3621 * compared to uncached. Graphics requests other than
@@ -3877,7 +3879,7 @@ i915_gem_load(struct drm_device *dev)
3877 INIT_LIST_HEAD(&dev_priv->mm.gtt_list); 3879 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3878 for (i = 0; i < I915_NUM_RINGS; i++) 3880 for (i = 0; i < I915_NUM_RINGS; i++)
3879 init_ring_lists(&dev_priv->ring[i]); 3881 init_ring_lists(&dev_priv->ring[i]);
3880 for (i = 0; i < 16; i++) 3882 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3881 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 3883 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3882 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 3884 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3883 i915_gem_retire_work_handler); 3885 i915_gem_retire_work_handler);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3693e83a97f3..c681dc149d2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,6 +32,7 @@
32#include "i915_drv.h" 32#include "i915_drv.h"
33#include "i915_trace.h" 33#include "i915_trace.h"
34#include "intel_drv.h" 34#include "intel_drv.h"
35#include <linux/dma_remapping.h>
35 36
36struct change_domains { 37struct change_domains {
37 uint32_t invalidate_domains; 38 uint32_t invalidate_domains;
@@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
746 return 0; 747 return 0;
747} 748}
748 749
750static bool
751intel_enable_semaphores(struct drm_device *dev)
752{
753 if (INTEL_INFO(dev)->gen < 6)
754 return 0;
755
756 if (i915_semaphores >= 0)
757 return i915_semaphores;
758
759 /* Enable semaphores on SNB when IO remapping is off */
760 if (INTEL_INFO(dev)->gen == 6)
761 return !intel_iommu_enabled;
762
763 return 1;
764}
765
749static int 766static int
750i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, 767i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
751 struct intel_ring_buffer *to) 768 struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
758 return 0; 775 return 0;
759 776
760 /* XXX gpu semaphores are implicated in various hard hangs on SNB */ 777 /* XXX gpu semaphores are implicated in various hard hangs on SNB */
761 if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores) 778 if (!intel_enable_semaphores(obj->base.dev))
762 return i915_gem_object_wait_rendering(obj); 779 return i915_gem_object_wait_rendering(obj);
763 780
764 idx = intel_ring_sync_index(from, to); 781 idx = intel_ring_sync_index(from, to);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 96643ee240da..3700df47ad93 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -824,6 +824,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
824 824
825 /* Fences */ 825 /* Fences */
826 switch (INTEL_INFO(dev)->gen) { 826 switch (INTEL_INFO(dev)->gen) {
827 case 7:
827 case 6: 828 case 6:
828 for (i = 0; i < 16; i++) 829 for (i = 0; i < 16; i++)
829 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 830 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 517bf0cda3e5..853f2f0acaa2 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1553,12 +1553,21 @@
1553 */ 1553 */
1554#define PP_READY (1 << 30) 1554#define PP_READY (1 << 30)
1555#define PP_SEQUENCE_NONE (0 << 28) 1555#define PP_SEQUENCE_NONE (0 << 28)
1556#define PP_SEQUENCE_ON (1 << 28) 1556#define PP_SEQUENCE_POWER_UP (1 << 28)
1557#define PP_SEQUENCE_OFF (2 << 28) 1557#define PP_SEQUENCE_POWER_DOWN (2 << 28)
1558#define PP_SEQUENCE_MASK 0x30000000 1558#define PP_SEQUENCE_MASK (3 << 28)
1559#define PP_SEQUENCE_SHIFT 28
1559#define PP_CYCLE_DELAY_ACTIVE (1 << 27) 1560#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
1560#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
1561#define PP_SEQUENCE_STATE_MASK 0x0000000f 1561#define PP_SEQUENCE_STATE_MASK 0x0000000f
1562#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
1563#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
1564#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
1565#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
1566#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
1567#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
1568#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
1569#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
1570#define PP_SEQUENCE_STATE_RESET (0xf << 0)
1562#define PP_CONTROL 0x61204 1571#define PP_CONTROL 0x61204
1563#define POWER_TARGET_ON (1 << 0) 1572#define POWER_TARGET_ON (1 << 0)
1564#define PP_ON_DELAYS 0x61208 1573#define PP_ON_DELAYS 0x61208
@@ -3295,10 +3304,10 @@
3295/* or SDVOB */ 3304/* or SDVOB */
3296#define HDMIB 0xe1140 3305#define HDMIB 0xe1140
3297#define PORT_ENABLE (1 << 31) 3306#define PORT_ENABLE (1 << 31)
3298#define TRANSCODER_A (0) 3307#define TRANSCODER(pipe) ((pipe) << 30)
3299#define TRANSCODER_B (1 << 30) 3308#define TRANSCODER_CPT(pipe) ((pipe) << 29)
3300#define TRANSCODER(pipe) ((pipe) << 30) 3309#define TRANSCODER_MASK (1 << 30)
3301#define TRANSCODER_MASK (1 << 30) 3310#define TRANSCODER_MASK_CPT (3 << 29)
3302#define COLOR_FORMAT_8bpc (0) 3311#define COLOR_FORMAT_8bpc (0)
3303#define COLOR_FORMAT_12bpc (3 << 26) 3312#define COLOR_FORMAT_12bpc (3 << 26)
3304#define SDVOB_HOTPLUG_ENABLE (1 << 23) 3313#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@@ -3439,12 +3448,38 @@
3439#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22) 3448#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
3440#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) 3449#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
3441 3450
3451/* IVB */
3452#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
3453#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
3454#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
3455#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
3456#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
3457#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
3458#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
3459
3460/* legacy values */
3461#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
3462#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
3463#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
3464#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
3465#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
3466
3467#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
3468
3442#define FORCEWAKE 0xA18C 3469#define FORCEWAKE 0xA18C
3443#define FORCEWAKE_ACK 0x130090 3470#define FORCEWAKE_ACK 0x130090
3471#define FORCEWAKE_MT 0xa188 /* multi-threaded */
3472#define FORCEWAKE_MT_ACK 0x130040
3473#define ECOBUS 0xa180
3474#define FORCEWAKE_MT_ENABLE (1<<5)
3444 3475
3445#define GT_FIFO_FREE_ENTRIES 0x120008 3476#define GT_FIFO_FREE_ENTRIES 0x120008
3446#define GT_FIFO_NUM_RESERVED_ENTRIES 20 3477#define GT_FIFO_NUM_RESERVED_ENTRIES 20
3447 3478
3479#define GEN6_UCGCTL2 0x9404
3480# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
3481# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
3482
3448#define GEN6_RPNSWREQ 0xA008 3483#define GEN6_RPNSWREQ 0xA008
3449#define GEN6_TURBO_DISABLE (1<<31) 3484#define GEN6_TURBO_DISABLE (1<<31)
3450#define GEN6_FREQUENCY(x) ((x)<<25) 3485#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index f8f602d76650..7886e4fb60e3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -370,6 +370,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
370 370
371 /* Fences */ 371 /* Fences */
372 switch (INTEL_INFO(dev)->gen) { 372 switch (INTEL_INFO(dev)->gen) {
373 case 7:
373 case 6: 374 case 6:
374 for (i = 0; i < 16; i++) 375 for (i = 0; i < 16; i++)
375 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 376 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
@@ -404,6 +405,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
404 405
405 /* Fences */ 406 /* Fences */
406 switch (INTEL_INFO(dev)->gen) { 407 switch (INTEL_INFO(dev)->gen) {
408 case 7:
407 case 6: 409 case 6:
408 for (i = 0; i < 16; i++) 410 for (i = 0; i < 16; i++)
409 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); 411 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8ecbc2f11633..5a3e7853003f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -38,8 +38,8 @@
38#include "i915_drv.h" 38#include "i915_drv.h"
39#include "i915_trace.h" 39#include "i915_trace.h"
40#include "drm_dp_helper.h" 40#include "drm_dp_helper.h"
41
42#include "drm_crtc_helper.h" 41#include "drm_crtc_helper.h"
42#include <linux/dma_remapping.h>
43 43
44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45 45
@@ -2933,7 +2933,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2933 2933
2934 /* For PCH DP, enable TRANS_DP_CTL */ 2934 /* For PCH DP, enable TRANS_DP_CTL */
2935 if (HAS_PCH_CPT(dev) && 2935 if (HAS_PCH_CPT(dev) &&
2936 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2936 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
2937 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2937 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 2938 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2938 reg = TRANS_DP_CTL(pipe); 2939 reg = TRANS_DP_CTL(pipe);
2939 temp = I915_READ(reg); 2940 temp = I915_READ(reg);
@@ -4669,6 +4670,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4669/** 4670/**
4670 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send 4671 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4671 * @crtc: CRTC structure 4672 * @crtc: CRTC structure
4673 * @mode: requested mode
4672 * 4674 *
4673 * A pipe may be connected to one or more outputs. Based on the depth of the 4675 * A pipe may be connected to one or more outputs. Based on the depth of the
4674 * attached framebuffer, choose a good color depth to use on the pipe. 4676 * attached framebuffer, choose a good color depth to use on the pipe.
@@ -4680,13 +4682,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4680 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc 4682 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4681 * Displays may support a restricted set as well, check EDID and clamp as 4683 * Displays may support a restricted set as well, check EDID and clamp as
4682 * appropriate. 4684 * appropriate.
4685 * DP may want to dither down to 6bpc to fit larger modes
4683 * 4686 *
4684 * RETURNS: 4687 * RETURNS:
4685 * Dithering requirement (i.e. false if display bpc and pipe bpc match, 4688 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4686 * true if they don't match). 4689 * true if they don't match).
4687 */ 4690 */
4688static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, 4691static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4689 unsigned int *pipe_bpp) 4692 unsigned int *pipe_bpp,
4693 struct drm_display_mode *mode)
4690{ 4694{
4691 struct drm_device *dev = crtc->dev; 4695 struct drm_device *dev = crtc->dev;
4692 struct drm_i915_private *dev_priv = dev->dev_private; 4696 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4711,7 +4715,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4711 lvds_bpc = 6; 4715 lvds_bpc = 6;
4712 4716
4713 if (lvds_bpc < display_bpc) { 4717 if (lvds_bpc < display_bpc) {
4714 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); 4718 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4715 display_bpc = lvds_bpc; 4719 display_bpc = lvds_bpc;
4716 } 4720 }
4717 continue; 4721 continue;
@@ -4722,7 +4726,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4722 unsigned int edp_bpc = dev_priv->edp.bpp / 3; 4726 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4723 4727
4724 if (edp_bpc < display_bpc) { 4728 if (edp_bpc < display_bpc) {
4725 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); 4729 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4726 display_bpc = edp_bpc; 4730 display_bpc = edp_bpc;
4727 } 4731 }
4728 continue; 4732 continue;
@@ -4737,7 +4741,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4737 /* Don't use an invalid EDID bpc value */ 4741 /* Don't use an invalid EDID bpc value */
4738 if (connector->display_info.bpc && 4742 if (connector->display_info.bpc &&
4739 connector->display_info.bpc < display_bpc) { 4743 connector->display_info.bpc < display_bpc) {
4740 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 4744 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4741 display_bpc = connector->display_info.bpc; 4745 display_bpc = connector->display_info.bpc;
4742 } 4746 }
4743 } 4747 }
@@ -4748,15 +4752,20 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4748 */ 4752 */
4749 if (intel_encoder->type == INTEL_OUTPUT_HDMI) { 4753 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4750 if (display_bpc > 8 && display_bpc < 12) { 4754 if (display_bpc > 8 && display_bpc < 12) {
4751 DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n"); 4755 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
4752 display_bpc = 12; 4756 display_bpc = 12;
4753 } else { 4757 } else {
4754 DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n"); 4758 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
4755 display_bpc = 8; 4759 display_bpc = 8;
4756 } 4760 }
4757 } 4761 }
4758 } 4762 }
4759 4763
4764 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4765 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
4766 display_bpc = 6;
4767 }
4768
4760 /* 4769 /*
4761 * We could just drive the pipe at the highest bpc all the time and 4770 * We could just drive the pipe at the highest bpc all the time and
4762 * enable dithering as needed, but that costs bandwidth. So choose 4771 * enable dithering as needed, but that costs bandwidth. So choose
@@ -4789,8 +4798,8 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4789 4798
4790 display_bpc = min(display_bpc, bpc); 4799 display_bpc = min(display_bpc, bpc);
4791 4800
4792 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", 4801 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
4793 bpc, display_bpc); 4802 bpc, display_bpc);
4794 4803
4795 *pipe_bpp = display_bpc * 3; 4804 *pipe_bpp = display_bpc * 3;
4796 4805
@@ -5018,6 +5027,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5018 pipeconf &= ~PIPECONF_DOUBLE_WIDE; 5027 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5019 } 5028 }
5020 5029
5030 /* default to 8bpc */
5031 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5032 if (is_dp) {
5033 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5034 pipeconf |= PIPECONF_BPP_6 |
5035 PIPECONF_DITHER_EN |
5036 PIPECONF_DITHER_TYPE_SP;
5037 }
5038 }
5039
5021 dpll |= DPLL_VCO_ENABLE; 5040 dpll |= DPLL_VCO_ENABLE;
5022 5041
5023 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 5042 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5479,7 +5498,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5479 /* determine panel color depth */ 5498 /* determine panel color depth */
5480 temp = I915_READ(PIPECONF(pipe)); 5499 temp = I915_READ(PIPECONF(pipe));
5481 temp &= ~PIPE_BPC_MASK; 5500 temp &= ~PIPE_BPC_MASK;
5482 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp); 5501 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
5483 switch (pipe_bpp) { 5502 switch (pipe_bpp) {
5484 case 18: 5503 case 18:
5485 temp |= PIPE_6BPC; 5504 temp |= PIPE_6BPC;
@@ -5671,7 +5690,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5671 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5690 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5672 if ((is_lvds && dev_priv->lvds_dither) || dither) { 5691 if ((is_lvds && dev_priv->lvds_dither) || dither) {
5673 pipeconf |= PIPECONF_DITHER_EN; 5692 pipeconf |= PIPECONF_DITHER_EN;
5674 pipeconf |= PIPECONF_DITHER_TYPE_ST1; 5693 pipeconf |= PIPECONF_DITHER_TYPE_SP;
5675 } 5694 }
5676 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5695 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5677 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5696 intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@ -7188,11 +7207,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7188 work->old_fb_obj = intel_fb->obj; 7207 work->old_fb_obj = intel_fb->obj;
7189 INIT_WORK(&work->work, intel_unpin_work_fn); 7208 INIT_WORK(&work->work, intel_unpin_work_fn);
7190 7209
7210 ret = drm_vblank_get(dev, intel_crtc->pipe);
7211 if (ret)
7212 goto free_work;
7213
7191 /* We borrow the event spin lock for protecting unpin_work */ 7214 /* We borrow the event spin lock for protecting unpin_work */
7192 spin_lock_irqsave(&dev->event_lock, flags); 7215 spin_lock_irqsave(&dev->event_lock, flags);
7193 if (intel_crtc->unpin_work) { 7216 if (intel_crtc->unpin_work) {
7194 spin_unlock_irqrestore(&dev->event_lock, flags); 7217 spin_unlock_irqrestore(&dev->event_lock, flags);
7195 kfree(work); 7218 kfree(work);
7219 drm_vblank_put(dev, intel_crtc->pipe);
7196 7220
7197 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 7221 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
7198 return -EBUSY; 7222 return -EBUSY;
@@ -7211,10 +7235,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7211 7235
7212 crtc->fb = fb; 7236 crtc->fb = fb;
7213 7237
7214 ret = drm_vblank_get(dev, intel_crtc->pipe);
7215 if (ret)
7216 goto cleanup_objs;
7217
7218 work->pending_flip_obj = obj; 7238 work->pending_flip_obj = obj;
7219 7239
7220 work->enable_stall_check = true; 7240 work->enable_stall_check = true;
@@ -7237,7 +7257,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7237 7257
7238cleanup_pending: 7258cleanup_pending:
7239 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7259 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7240cleanup_objs:
7241 drm_gem_object_unreference(&work->old_fb_obj->base); 7260 drm_gem_object_unreference(&work->old_fb_obj->base);
7242 drm_gem_object_unreference(&obj->base); 7261 drm_gem_object_unreference(&obj->base);
7243 mutex_unlock(&dev->struct_mutex); 7262 mutex_unlock(&dev->struct_mutex);
@@ -7246,6 +7265,8 @@ cleanup_objs:
7246 intel_crtc->unpin_work = NULL; 7265 intel_crtc->unpin_work = NULL;
7247 spin_unlock_irqrestore(&dev->event_lock, flags); 7266 spin_unlock_irqrestore(&dev->event_lock, flags);
7248 7267
7268 drm_vblank_put(dev, intel_crtc->pipe);
7269free_work:
7249 kfree(work); 7270 kfree(work);
7250 7271
7251 return ret; 7272 return ret;
@@ -7891,6 +7912,33 @@ void intel_init_emon(struct drm_device *dev)
7891 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); 7912 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
7892} 7913}
7893 7914
7915static bool intel_enable_rc6(struct drm_device *dev)
7916{
7917 /*
7918 * Respect the kernel parameter if it is set
7919 */
7920 if (i915_enable_rc6 >= 0)
7921 return i915_enable_rc6;
7922
7923 /*
7924 * Disable RC6 on Ironlake
7925 */
7926 if (INTEL_INFO(dev)->gen == 5)
7927 return 0;
7928
7929 /*
7930 * Enable rc6 on Sandybridge if DMA remapping is disabled
7931 */
7932 if (INTEL_INFO(dev)->gen == 6) {
7933 DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
7934 intel_iommu_enabled ? "true" : "false",
7935 !intel_iommu_enabled ? "en" : "dis");
7936 return !intel_iommu_enabled;
7937 }
7938 DRM_DEBUG_DRIVER("RC6 enabled\n");
7939 return 1;
7940}
7941
7894void gen6_enable_rps(struct drm_i915_private *dev_priv) 7942void gen6_enable_rps(struct drm_i915_private *dev_priv)
7895{ 7943{
7896 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 7944 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7927,7 +7975,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
7927 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); 7975 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
7928 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 7976 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
7929 7977
7930 if (i915_enable_rc6) 7978 if (intel_enable_rc6(dev_priv->dev))
7931 rc6_mask = GEN6_RC_CTL_RC6p_ENABLE | 7979 rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
7932 GEN6_RC_CTL_RC6_ENABLE; 7980 GEN6_RC_CTL_RC6_ENABLE;
7933 7981
@@ -8153,6 +8201,20 @@ static void gen6_init_clock_gating(struct drm_device *dev)
8153 I915_WRITE(WM2_LP_ILK, 0); 8201 I915_WRITE(WM2_LP_ILK, 0);
8154 I915_WRITE(WM1_LP_ILK, 0); 8202 I915_WRITE(WM1_LP_ILK, 0);
8155 8203
8204 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8205 * gating disable must be set. Failure to set it results in
8206 * flickering pixels due to Z write ordering failures after
8207 * some amount of runtime in the Mesa "fire" demo, and Unigine
8208 * Sanctuary and Tropics, and apparently anything else with
8209 * alpha test or pixel discard.
8210 *
8211 * According to the spec, bit 11 (RCCUNIT) must also be set,
8212 * but we didn't debug actual testcases to find it out.
8213 */
8214 I915_WRITE(GEN6_UCGCTL2,
8215 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8216 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8217
8156 /* 8218 /*
8157 * According to the spec the following bits should be 8219 * According to the spec the following bits should be
8158 * set in order to enable memory self-refresh and fbc: 8220 * set in order to enable memory self-refresh and fbc:
@@ -8362,7 +8424,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
8362 /* rc6 disabled by default due to repeated reports of hanging during 8424 /* rc6 disabled by default due to repeated reports of hanging during
8363 * boot and resume. 8425 * boot and resume.
8364 */ 8426 */
8365 if (!i915_enable_rc6) 8427 if (!intel_enable_rc6(dev))
8366 return; 8428 return;
8367 8429
8368 mutex_lock(&dev->struct_mutex); 8430 mutex_lock(&dev->struct_mutex);
@@ -8481,6 +8543,28 @@ static void intel_init_display(struct drm_device *dev)
8481 8543
8482 /* For FIFO watermark updates */ 8544 /* For FIFO watermark updates */
8483 if (HAS_PCH_SPLIT(dev)) { 8545 if (HAS_PCH_SPLIT(dev)) {
8546 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
8547 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
8548
8549 /* IVB configs may use multi-threaded forcewake */
8550 if (IS_IVYBRIDGE(dev)) {
8551 u32 ecobus;
8552
8553 mutex_lock(&dev->struct_mutex);
8554 __gen6_gt_force_wake_mt_get(dev_priv);
8555 ecobus = I915_READ(ECOBUS);
8556 __gen6_gt_force_wake_mt_put(dev_priv);
8557 mutex_unlock(&dev->struct_mutex);
8558
8559 if (ecobus & FORCEWAKE_MT_ENABLE) {
8560 DRM_DEBUG_KMS("Using MT version of forcewake\n");
8561 dev_priv->display.force_wake_get =
8562 __gen6_gt_force_wake_mt_get;
8563 dev_priv->display.force_wake_put =
8564 __gen6_gt_force_wake_mt_put;
8565 }
8566 }
8567
8484 if (HAS_PCH_IBX(dev)) 8568 if (HAS_PCH_IBX(dev))
8485 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; 8569 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
8486 else if (HAS_PCH_CPT(dev)) 8570 else if (HAS_PCH_CPT(dev))
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 09b318b0227f..92b041b66e49 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -59,7 +59,6 @@ struct intel_dp {
59 struct i2c_algo_dp_aux_data algo; 59 struct i2c_algo_dp_aux_data algo;
60 bool is_pch_edp; 60 bool is_pch_edp;
61 uint8_t train_set[4]; 61 uint8_t train_set[4];
62 uint8_t link_status[DP_LINK_STATUS_SIZE];
63 int panel_power_up_delay; 62 int panel_power_up_delay;
64 int panel_power_down_delay; 63 int panel_power_down_delay;
65 int panel_power_cycle_delay; 64 int panel_power_cycle_delay;
@@ -68,7 +67,6 @@ struct intel_dp {
68 struct drm_display_mode *panel_fixed_mode; /* for eDP */ 67 struct drm_display_mode *panel_fixed_mode; /* for eDP */
69 struct delayed_work panel_vdd_work; 68 struct delayed_work panel_vdd_work;
70 bool want_panel_vdd; 69 bool want_panel_vdd;
71 unsigned long panel_off_jiffies;
72}; 70};
73 71
74/** 72/**
@@ -157,16 +155,12 @@ intel_edp_link_config(struct intel_encoder *intel_encoder,
157static int 155static int
158intel_dp_max_lane_count(struct intel_dp *intel_dp) 156intel_dp_max_lane_count(struct intel_dp *intel_dp)
159{ 157{
160 int max_lane_count = 4; 158 int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
161 159 switch (max_lane_count) {
162 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 160 case 1: case 2: case 4:
163 max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; 161 break;
164 switch (max_lane_count) { 162 default:
165 case 1: case 2: case 4: 163 max_lane_count = 4;
166 break;
167 default:
168 max_lane_count = 4;
169 }
170 } 164 }
171 return max_lane_count; 165 return max_lane_count;
172} 166}
@@ -214,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
214 */ 208 */
215 209
216static int 210static int
217intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock) 211intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
218{ 212{
219 struct drm_crtc *crtc = intel_dp->base.base.crtc; 213 struct drm_crtc *crtc = intel_dp->base.base.crtc;
220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 214 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
221 int bpp = 24; 215 int bpp = 24;
222 216
223 if (intel_crtc) 217 if (check_bpp)
218 bpp = check_bpp;
219 else if (intel_crtc)
224 bpp = intel_crtc->bpp; 220 bpp = intel_crtc->bpp;
225 221
226 return (pixel_clock * bpp + 9) / 10; 222 return (pixel_clock * bpp + 9) / 10;
@@ -239,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
239 struct intel_dp *intel_dp = intel_attached_dp(connector); 235 struct intel_dp *intel_dp = intel_attached_dp(connector);
240 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 236 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
241 int max_lanes = intel_dp_max_lane_count(intel_dp); 237 int max_lanes = intel_dp_max_lane_count(intel_dp);
238 int max_rate, mode_rate;
242 239
243 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 240 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
244 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) 241 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -248,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
248 return MODE_PANEL; 245 return MODE_PANEL;
249 } 246 }
250 247
251 if (intel_dp_link_required(intel_dp, mode->clock) 248 mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
252 > intel_dp_max_data_rate(max_link_clock, max_lanes)) 249 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
253 return MODE_CLOCK_HIGH; 250
251 if (mode_rate > max_rate) {
252 mode_rate = intel_dp_link_required(intel_dp,
253 mode->clock, 18);
254 if (mode_rate > max_rate)
255 return MODE_CLOCK_HIGH;
256 else
257 mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
258 }
254 259
255 if (mode->clock < 10000) 260 if (mode->clock < 10000)
256 return MODE_CLOCK_LOW; 261 return MODE_CLOCK_LOW;
@@ -368,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
368 * clock divider. 373 * clock divider.
369 */ 374 */
370 if (is_cpu_edp(intel_dp)) { 375 if (is_cpu_edp(intel_dp)) {
371 if (IS_GEN6(dev)) 376 if (IS_GEN6(dev) || IS_GEN7(dev))
372 aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ 377 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
373 else 378 else
374 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 379 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
375 } else if (HAS_PCH_SPLIT(dev)) 380 } else if (HAS_PCH_SPLIT(dev))
@@ -678,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
678 int lane_count, clock; 683 int lane_count, clock;
679 int max_lane_count = intel_dp_max_lane_count(intel_dp); 684 int max_lane_count = intel_dp_max_lane_count(intel_dp);
680 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 685 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
686 int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
681 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 687 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
682 688
683 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 689 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -695,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
695 for (clock = 0; clock <= max_clock; clock++) { 701 for (clock = 0; clock <= max_clock; clock++) {
696 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 702 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
697 703
698 if (intel_dp_link_required(intel_dp, mode->clock) 704 if (intel_dp_link_required(intel_dp, mode->clock, bpp)
699 <= link_avail) { 705 <= link_avail) {
700 intel_dp->link_bw = bws[clock]; 706 intel_dp->link_bw = bws[clock];
701 intel_dp->lane_count = lane_count; 707 intel_dp->lane_count = lane_count;
@@ -768,12 +774,11 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
768 continue; 774 continue;
769 775
770 intel_dp = enc_to_intel_dp(encoder); 776 intel_dp = enc_to_intel_dp(encoder);
771 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { 777 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
778 intel_dp->base.type == INTEL_OUTPUT_EDP)
779 {
772 lane_count = intel_dp->lane_count; 780 lane_count = intel_dp->lane_count;
773 break; 781 break;
774 } else if (is_edp(intel_dp)) {
775 lane_count = dev_priv->edp.lanes;
776 break;
777 } 782 }
778 } 783 }
779 784
@@ -810,6 +815,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
810 struct drm_display_mode *adjusted_mode) 815 struct drm_display_mode *adjusted_mode)
811{ 816{
812 struct drm_device *dev = encoder->dev; 817 struct drm_device *dev = encoder->dev;
818 struct drm_i915_private *dev_priv = dev->dev_private;
813 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 819 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
814 struct drm_crtc *crtc = intel_dp->base.base.crtc; 820 struct drm_crtc *crtc = intel_dp->base.base.crtc;
815 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 821 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -822,18 +828,32 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
822 ironlake_edp_pll_off(encoder); 828 ironlake_edp_pll_off(encoder);
823 } 829 }
824 830
825 intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 831 /*
826 intel_dp->DP |= intel_dp->color_range; 832 * There are four kinds of DP registers:
833 *
834 * IBX PCH
835 * SNB CPU
836 * IVB CPU
837 * CPT PCH
838 *
839 * IBX PCH and CPU are the same for almost everything,
840 * except that the CPU DP PLL is configured in this
841 * register
842 *
843 * CPT PCH is quite different, having many bits moved
844 * to the TRANS_DP_CTL register instead. That
845 * configuration happens (oddly) in ironlake_pch_enable
846 */
847
848 /* Preserve the BIOS-computed detected bit. This is
849 * supposed to be read-only.
850 */
851 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
852 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
827 853
828 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 854 /* Handle DP bits in common between all three register formats */
829 intel_dp->DP |= DP_SYNC_HS_HIGH;
830 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
831 intel_dp->DP |= DP_SYNC_VS_HIGH;
832 855
833 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 856 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
834 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
835 else
836 intel_dp->DP |= DP_LINK_TRAIN_OFF;
837 857
838 switch (intel_dp->lane_count) { 858 switch (intel_dp->lane_count) {
839 case 1: 859 case 1:
@@ -852,59 +872,124 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
852 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 872 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
853 intel_write_eld(encoder, adjusted_mode); 873 intel_write_eld(encoder, adjusted_mode);
854 } 874 }
855
856 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 875 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
857 intel_dp->link_configuration[0] = intel_dp->link_bw; 876 intel_dp->link_configuration[0] = intel_dp->link_bw;
858 intel_dp->link_configuration[1] = intel_dp->lane_count; 877 intel_dp->link_configuration[1] = intel_dp->lane_count;
859 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 878 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
860
861 /* 879 /*
862 * Check for DPCD version > 1.1 and enhanced framing support 880 * Check for DPCD version > 1.1 and enhanced framing support
863 */ 881 */
864 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 882 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
865 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 883 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
866 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 884 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
867 intel_dp->DP |= DP_ENHANCED_FRAMING;
868 } 885 }
869 886
870 /* CPT DP's pipe select is decided in TRANS_DP_CTL */ 887 /* Split out the IBX/CPU vs CPT settings */
871 if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) 888
872 intel_dp->DP |= DP_PIPEB_SELECT; 889 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
890 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
891 intel_dp->DP |= DP_SYNC_HS_HIGH;
892 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
893 intel_dp->DP |= DP_SYNC_VS_HIGH;
894 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
895
896 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
897 intel_dp->DP |= DP_ENHANCED_FRAMING;
898
899 intel_dp->DP |= intel_crtc->pipe << 29;
873 900
874 if (is_cpu_edp(intel_dp)) {
875 /* don't miss out required setting for eDP */ 901 /* don't miss out required setting for eDP */
876 intel_dp->DP |= DP_PLL_ENABLE; 902 intel_dp->DP |= DP_PLL_ENABLE;
877 if (adjusted_mode->clock < 200000) 903 if (adjusted_mode->clock < 200000)
878 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 904 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
879 else 905 else
880 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 906 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
907 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
908 intel_dp->DP |= intel_dp->color_range;
909
910 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
911 intel_dp->DP |= DP_SYNC_HS_HIGH;
912 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
913 intel_dp->DP |= DP_SYNC_VS_HIGH;
914 intel_dp->DP |= DP_LINK_TRAIN_OFF;
915
916 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
917 intel_dp->DP |= DP_ENHANCED_FRAMING;
918
919 if (intel_crtc->pipe == 1)
920 intel_dp->DP |= DP_PIPEB_SELECT;
921
922 if (is_cpu_edp(intel_dp)) {
923 /* don't miss out required setting for eDP */
924 intel_dp->DP |= DP_PLL_ENABLE;
925 if (adjusted_mode->clock < 200000)
926 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
927 else
928 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
929 }
930 } else {
931 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
881 } 932 }
882} 933}
883 934
884static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 935#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
936#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
937
938#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
939#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
940
941#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
942#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
943
944static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
945 u32 mask,
946 u32 value)
885{ 947{
886 unsigned long off_time; 948 struct drm_device *dev = intel_dp->base.base.dev;
887 unsigned long delay; 949 struct drm_i915_private *dev_priv = dev->dev_private;
888 950
889 DRM_DEBUG_KMS("Wait for panel power off time\n"); 951 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
952 mask, value,
953 I915_READ(PCH_PP_STATUS),
954 I915_READ(PCH_PP_CONTROL));
890 955
891 if (ironlake_edp_have_panel_power(intel_dp) || 956 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
892 ironlake_edp_have_panel_vdd(intel_dp)) 957 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
893 { 958 I915_READ(PCH_PP_STATUS),
894 DRM_DEBUG_KMS("Panel still on, no delay needed\n"); 959 I915_READ(PCH_PP_CONTROL));
895 return;
896 } 960 }
961}
897 962
898 off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay); 963static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
899 if (time_after(jiffies, off_time)) { 964{
900 DRM_DEBUG_KMS("Time already passed"); 965 DRM_DEBUG_KMS("Wait for panel power on\n");
901 return; 966 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
902 } 967}
903 delay = jiffies_to_msecs(off_time - jiffies); 968
904 if (delay > intel_dp->panel_power_down_delay) 969static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
905 delay = intel_dp->panel_power_down_delay; 970{
906 DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay); 971 DRM_DEBUG_KMS("Wait for panel power off time\n");
907 msleep(delay); 972 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
973}
974
975static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
976{
977 DRM_DEBUG_KMS("Wait for panel power cycle\n");
978 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
979}
980
981
982/* Read the current pp_control value, unlocking the register if it
983 * is locked
984 */
985
986static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
987{
988 u32 control = I915_READ(PCH_PP_CONTROL);
989
990 control &= ~PANEL_UNLOCK_MASK;
991 control |= PANEL_UNLOCK_REGS;
992 return control;
908} 993}
909 994
910static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 995static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
@@ -921,15 +1006,16 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
921 "eDP VDD already requested on\n"); 1006 "eDP VDD already requested on\n");
922 1007
923 intel_dp->want_panel_vdd = true; 1008 intel_dp->want_panel_vdd = true;
1009
924 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1010 if (ironlake_edp_have_panel_vdd(intel_dp)) {
925 DRM_DEBUG_KMS("eDP VDD already on\n"); 1011 DRM_DEBUG_KMS("eDP VDD already on\n");
926 return; 1012 return;
927 } 1013 }
928 1014
929 ironlake_wait_panel_off(intel_dp); 1015 if (!ironlake_edp_have_panel_power(intel_dp))
930 pp = I915_READ(PCH_PP_CONTROL); 1016 ironlake_wait_panel_power_cycle(intel_dp);
931 pp &= ~PANEL_UNLOCK_MASK; 1017
932 pp |= PANEL_UNLOCK_REGS; 1018 pp = ironlake_get_pp_control(dev_priv);
933 pp |= EDP_FORCE_VDD; 1019 pp |= EDP_FORCE_VDD;
934 I915_WRITE(PCH_PP_CONTROL, pp); 1020 I915_WRITE(PCH_PP_CONTROL, pp);
935 POSTING_READ(PCH_PP_CONTROL); 1021 POSTING_READ(PCH_PP_CONTROL);
@@ -952,9 +1038,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
952 u32 pp; 1038 u32 pp;
953 1039
954 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1040 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
955 pp = I915_READ(PCH_PP_CONTROL); 1041 pp = ironlake_get_pp_control(dev_priv);
956 pp &= ~PANEL_UNLOCK_MASK;
957 pp |= PANEL_UNLOCK_REGS;
958 pp &= ~EDP_FORCE_VDD; 1042 pp &= ~EDP_FORCE_VDD;
959 I915_WRITE(PCH_PP_CONTROL, pp); 1043 I915_WRITE(PCH_PP_CONTROL, pp);
960 POSTING_READ(PCH_PP_CONTROL); 1044 POSTING_READ(PCH_PP_CONTROL);
@@ -962,7 +1046,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
962 /* Make sure sequencer is idle before allowing subsequent activity */ 1046 /* Make sure sequencer is idle before allowing subsequent activity */
963 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1047 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
964 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1048 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
965 intel_dp->panel_off_jiffies = jiffies; 1049
1050 msleep(intel_dp->panel_power_down_delay);
966 } 1051 }
967} 1052}
968 1053
@@ -972,9 +1057,9 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
972 struct intel_dp, panel_vdd_work); 1057 struct intel_dp, panel_vdd_work);
973 struct drm_device *dev = intel_dp->base.base.dev; 1058 struct drm_device *dev = intel_dp->base.base.dev;
974 1059
975 mutex_lock(&dev->struct_mutex); 1060 mutex_lock(&dev->mode_config.mutex);
976 ironlake_panel_vdd_off_sync(intel_dp); 1061 ironlake_panel_vdd_off_sync(intel_dp);
977 mutex_unlock(&dev->struct_mutex); 1062 mutex_unlock(&dev->mode_config.mutex);
978} 1063}
979 1064
980static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1065static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
@@ -984,7 +1069,7 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
984 1069
985 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1070 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
986 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1071 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
987 1072
988 intel_dp->want_panel_vdd = false; 1073 intel_dp->want_panel_vdd = false;
989 1074
990 if (sync) { 1075 if (sync) {
@@ -1000,23 +1085,25 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1000 } 1085 }
1001} 1086}
1002 1087
1003/* Returns true if the panel was already on when called */
1004static void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1088static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1005{ 1089{
1006 struct drm_device *dev = intel_dp->base.base.dev; 1090 struct drm_device *dev = intel_dp->base.base.dev;
1007 struct drm_i915_private *dev_priv = dev->dev_private; 1091 struct drm_i915_private *dev_priv = dev->dev_private;
1008 u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; 1092 u32 pp;
1009 1093
1010 if (!is_edp(intel_dp)) 1094 if (!is_edp(intel_dp))
1011 return; 1095 return;
1012 if (ironlake_edp_have_panel_power(intel_dp)) 1096
1097 DRM_DEBUG_KMS("Turn eDP power on\n");
1098
1099 if (ironlake_edp_have_panel_power(intel_dp)) {
1100 DRM_DEBUG_KMS("eDP power already on\n");
1013 return; 1101 return;
1102 }
1014 1103
1015 ironlake_wait_panel_off(intel_dp); 1104 ironlake_wait_panel_power_cycle(intel_dp);
1016 pp = I915_READ(PCH_PP_CONTROL);
1017 pp &= ~PANEL_UNLOCK_MASK;
1018 pp |= PANEL_UNLOCK_REGS;
1019 1105
1106 pp = ironlake_get_pp_control(dev_priv);
1020 if (IS_GEN5(dev)) { 1107 if (IS_GEN5(dev)) {
1021 /* ILK workaround: disable reset around power sequence */ 1108 /* ILK workaround: disable reset around power sequence */
1022 pp &= ~PANEL_POWER_RESET; 1109 pp &= ~PANEL_POWER_RESET;
@@ -1025,13 +1112,13 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1025 } 1112 }
1026 1113
1027 pp |= POWER_TARGET_ON; 1114 pp |= POWER_TARGET_ON;
1115 if (!IS_GEN5(dev))
1116 pp |= PANEL_POWER_RESET;
1117
1028 I915_WRITE(PCH_PP_CONTROL, pp); 1118 I915_WRITE(PCH_PP_CONTROL, pp);
1029 POSTING_READ(PCH_PP_CONTROL); 1119 POSTING_READ(PCH_PP_CONTROL);
1030 1120
1031 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, 1121 ironlake_wait_panel_on(intel_dp);
1032 5000))
1033 DRM_ERROR("panel on wait timed out: 0x%08x\n",
1034 I915_READ(PCH_PP_STATUS));
1035 1122
1036 if (IS_GEN5(dev)) { 1123 if (IS_GEN5(dev)) {
1037 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1124 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
@@ -1040,46 +1127,25 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1040 } 1127 }
1041} 1128}
1042 1129
1043static void ironlake_edp_panel_off(struct drm_encoder *encoder) 1130static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1044{ 1131{
1045 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1132 struct drm_device *dev = intel_dp->base.base.dev;
1046 struct drm_device *dev = encoder->dev;
1047 struct drm_i915_private *dev_priv = dev->dev_private; 1133 struct drm_i915_private *dev_priv = dev->dev_private;
1048 u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | 1134 u32 pp;
1049 PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
1050 1135
1051 if (!is_edp(intel_dp)) 1136 if (!is_edp(intel_dp))
1052 return; 1137 return;
1053 pp = I915_READ(PCH_PP_CONTROL);
1054 pp &= ~PANEL_UNLOCK_MASK;
1055 pp |= PANEL_UNLOCK_REGS;
1056 1138
1057 if (IS_GEN5(dev)) { 1139 DRM_DEBUG_KMS("Turn eDP power off\n");
1058 /* ILK workaround: disable reset around power sequence */
1059 pp &= ~PANEL_POWER_RESET;
1060 I915_WRITE(PCH_PP_CONTROL, pp);
1061 POSTING_READ(PCH_PP_CONTROL);
1062 }
1063 1140
1064 intel_dp->panel_off_jiffies = jiffies; 1141 WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
1065 1142
1066 if (IS_GEN5(dev)) { 1143 pp = ironlake_get_pp_control(dev_priv);
1067 pp &= ~POWER_TARGET_ON; 1144 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1068 I915_WRITE(PCH_PP_CONTROL, pp); 1145 I915_WRITE(PCH_PP_CONTROL, pp);
1069 POSTING_READ(PCH_PP_CONTROL); 1146 POSTING_READ(PCH_PP_CONTROL);
1070 pp &= ~POWER_TARGET_ON;
1071 I915_WRITE(PCH_PP_CONTROL, pp);
1072 POSTING_READ(PCH_PP_CONTROL);
1073 msleep(intel_dp->panel_power_cycle_delay);
1074
1075 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
1076 DRM_ERROR("panel off wait timed out: 0x%08x\n",
1077 I915_READ(PCH_PP_STATUS));
1078 1147
1079 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1148 ironlake_wait_panel_off(intel_dp);
1080 I915_WRITE(PCH_PP_CONTROL, pp);
1081 POSTING_READ(PCH_PP_CONTROL);
1082 }
1083} 1149}
1084 1150
1085static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1151static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1099,9 +1165,7 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1099 * allowing it to appear. 1165 * allowing it to appear.
1100 */ 1166 */
1101 msleep(intel_dp->backlight_on_delay); 1167 msleep(intel_dp->backlight_on_delay);
1102 pp = I915_READ(PCH_PP_CONTROL); 1168 pp = ironlake_get_pp_control(dev_priv);
1103 pp &= ~PANEL_UNLOCK_MASK;
1104 pp |= PANEL_UNLOCK_REGS;
1105 pp |= EDP_BLC_ENABLE; 1169 pp |= EDP_BLC_ENABLE;
1106 I915_WRITE(PCH_PP_CONTROL, pp); 1170 I915_WRITE(PCH_PP_CONTROL, pp);
1107 POSTING_READ(PCH_PP_CONTROL); 1171 POSTING_READ(PCH_PP_CONTROL);
@@ -1117,9 +1181,7 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1117 return; 1181 return;
1118 1182
1119 DRM_DEBUG_KMS("\n"); 1183 DRM_DEBUG_KMS("\n");
1120 pp = I915_READ(PCH_PP_CONTROL); 1184 pp = ironlake_get_pp_control(dev_priv);
1121 pp &= ~PANEL_UNLOCK_MASK;
1122 pp |= PANEL_UNLOCK_REGS;
1123 pp &= ~EDP_BLC_ENABLE; 1185 pp &= ~EDP_BLC_ENABLE;
1124 I915_WRITE(PCH_PP_CONTROL, pp); 1186 I915_WRITE(PCH_PP_CONTROL, pp);
1125 POSTING_READ(PCH_PP_CONTROL); 1187 POSTING_READ(PCH_PP_CONTROL);
@@ -1187,17 +1249,18 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
1187{ 1249{
1188 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1250 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1189 1251
1252 ironlake_edp_backlight_off(intel_dp);
1253 ironlake_edp_panel_off(intel_dp);
1254
1190 /* Wake up the sink first */ 1255 /* Wake up the sink first */
1191 ironlake_edp_panel_vdd_on(intel_dp); 1256 ironlake_edp_panel_vdd_on(intel_dp);
1192 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1257 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1258 intel_dp_link_down(intel_dp);
1193 ironlake_edp_panel_vdd_off(intel_dp, false); 1259 ironlake_edp_panel_vdd_off(intel_dp, false);
1194 1260
1195 /* Make sure the panel is off before trying to 1261 /* Make sure the panel is off before trying to
1196 * change the mode 1262 * change the mode
1197 */ 1263 */
1198 ironlake_edp_backlight_off(intel_dp);
1199 intel_dp_link_down(intel_dp);
1200 ironlake_edp_panel_off(encoder);
1201} 1264}
1202 1265
1203static void intel_dp_commit(struct drm_encoder *encoder) 1266static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1211,7 +1274,6 @@ static void intel_dp_commit(struct drm_encoder *encoder)
1211 intel_dp_start_link_train(intel_dp); 1274 intel_dp_start_link_train(intel_dp);
1212 ironlake_edp_panel_on(intel_dp); 1275 ironlake_edp_panel_on(intel_dp);
1213 ironlake_edp_panel_vdd_off(intel_dp, true); 1276 ironlake_edp_panel_vdd_off(intel_dp, true);
1214
1215 intel_dp_complete_link_train(intel_dp); 1277 intel_dp_complete_link_train(intel_dp);
1216 ironlake_edp_backlight_on(intel_dp); 1278 ironlake_edp_backlight_on(intel_dp);
1217 1279
@@ -1230,16 +1292,20 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1230 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1292 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1231 1293
1232 if (mode != DRM_MODE_DPMS_ON) { 1294 if (mode != DRM_MODE_DPMS_ON) {
1295 ironlake_edp_backlight_off(intel_dp);
1296 ironlake_edp_panel_off(intel_dp);
1297
1233 ironlake_edp_panel_vdd_on(intel_dp); 1298 ironlake_edp_panel_vdd_on(intel_dp);
1234 if (is_edp(intel_dp))
1235 ironlake_edp_backlight_off(intel_dp);
1236 intel_dp_sink_dpms(intel_dp, mode); 1299 intel_dp_sink_dpms(intel_dp, mode);
1237 intel_dp_link_down(intel_dp); 1300 intel_dp_link_down(intel_dp);
1238 ironlake_edp_panel_off(encoder);
1239 if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
1240 ironlake_edp_pll_off(encoder);
1241 ironlake_edp_panel_vdd_off(intel_dp, false); 1301 ironlake_edp_panel_vdd_off(intel_dp, false);
1302
1303 if (is_cpu_edp(intel_dp))
1304 ironlake_edp_pll_off(encoder);
1242 } else { 1305 } else {
1306 if (is_cpu_edp(intel_dp))
1307 ironlake_edp_pll_on(encoder);
1308
1243 ironlake_edp_panel_vdd_on(intel_dp); 1309 ironlake_edp_panel_vdd_on(intel_dp);
1244 intel_dp_sink_dpms(intel_dp, mode); 1310 intel_dp_sink_dpms(intel_dp, mode);
1245 if (!(dp_reg & DP_PORT_EN)) { 1311 if (!(dp_reg & DP_PORT_EN)) {
@@ -1247,7 +1313,6 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1247 ironlake_edp_panel_on(intel_dp); 1313 ironlake_edp_panel_on(intel_dp);
1248 ironlake_edp_panel_vdd_off(intel_dp, true); 1314 ironlake_edp_panel_vdd_off(intel_dp, true);
1249 intel_dp_complete_link_train(intel_dp); 1315 intel_dp_complete_link_train(intel_dp);
1250 ironlake_edp_backlight_on(intel_dp);
1251 } else 1316 } else
1252 ironlake_edp_panel_vdd_off(intel_dp, false); 1317 ironlake_edp_panel_vdd_off(intel_dp, false);
1253 ironlake_edp_backlight_on(intel_dp); 1318 ironlake_edp_backlight_on(intel_dp);
@@ -1285,11 +1350,11 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1285 * link status information 1350 * link status information
1286 */ 1351 */
1287static bool 1352static bool
1288intel_dp_get_link_status(struct intel_dp *intel_dp) 1353intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1289{ 1354{
1290 return intel_dp_aux_native_read_retry(intel_dp, 1355 return intel_dp_aux_native_read_retry(intel_dp,
1291 DP_LANE0_1_STATUS, 1356 DP_LANE0_1_STATUS,
1292 intel_dp->link_status, 1357 link_status,
1293 DP_LINK_STATUS_SIZE); 1358 DP_LINK_STATUS_SIZE);
1294} 1359}
1295 1360
@@ -1301,27 +1366,25 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1301} 1366}
1302 1367
1303static uint8_t 1368static uint8_t
1304intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], 1369intel_get_adjust_request_voltage(uint8_t adjust_request[2],
1305 int lane) 1370 int lane)
1306{ 1371{
1307 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1308 int s = ((lane & 1) ? 1372 int s = ((lane & 1) ?
1309 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 1373 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
1310 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); 1374 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
1311 uint8_t l = intel_dp_link_status(link_status, i); 1375 uint8_t l = adjust_request[lane>>1];
1312 1376
1313 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 1377 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
1314} 1378}
1315 1379
1316static uint8_t 1380static uint8_t
1317intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], 1381intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
1318 int lane) 1382 int lane)
1319{ 1383{
1320 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1321 int s = ((lane & 1) ? 1384 int s = ((lane & 1) ?
1322 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : 1385 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
1323 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); 1386 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
1324 uint8_t l = intel_dp_link_status(link_status, i); 1387 uint8_t l = adjust_request[lane>>1];
1325 1388
1326 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 1389 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1327} 1390}
@@ -1343,34 +1406,63 @@ static char *link_train_names[] = {
1343 * These are source-specific values; current Intel hardware supports 1406 * These are source-specific values; current Intel hardware supports
1344 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1407 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1345 */ 1408 */
1346#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
1347 1409
1348static uint8_t 1410static uint8_t
1349intel_dp_pre_emphasis_max(uint8_t voltage_swing) 1411intel_dp_voltage_max(struct intel_dp *intel_dp)
1350{ 1412{
1351 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1413 struct drm_device *dev = intel_dp->base.base.dev;
1352 case DP_TRAIN_VOLTAGE_SWING_400: 1414
1353 return DP_TRAIN_PRE_EMPHASIS_6; 1415 if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1354 case DP_TRAIN_VOLTAGE_SWING_600: 1416 return DP_TRAIN_VOLTAGE_SWING_800;
1355 return DP_TRAIN_PRE_EMPHASIS_6; 1417 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1356 case DP_TRAIN_VOLTAGE_SWING_800: 1418 return DP_TRAIN_VOLTAGE_SWING_1200;
1357 return DP_TRAIN_PRE_EMPHASIS_3_5; 1419 else
1358 case DP_TRAIN_VOLTAGE_SWING_1200: 1420 return DP_TRAIN_VOLTAGE_SWING_800;
1359 default: 1421}
1360 return DP_TRAIN_PRE_EMPHASIS_0; 1422
1423static uint8_t
1424intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1425{
1426 struct drm_device *dev = intel_dp->base.base.dev;
1427
1428 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1429 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1430 case DP_TRAIN_VOLTAGE_SWING_400:
1431 return DP_TRAIN_PRE_EMPHASIS_6;
1432 case DP_TRAIN_VOLTAGE_SWING_600:
1433 case DP_TRAIN_VOLTAGE_SWING_800:
1434 return DP_TRAIN_PRE_EMPHASIS_3_5;
1435 default:
1436 return DP_TRAIN_PRE_EMPHASIS_0;
1437 }
1438 } else {
1439 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1440 case DP_TRAIN_VOLTAGE_SWING_400:
1441 return DP_TRAIN_PRE_EMPHASIS_6;
1442 case DP_TRAIN_VOLTAGE_SWING_600:
1443 return DP_TRAIN_PRE_EMPHASIS_6;
1444 case DP_TRAIN_VOLTAGE_SWING_800:
1445 return DP_TRAIN_PRE_EMPHASIS_3_5;
1446 case DP_TRAIN_VOLTAGE_SWING_1200:
1447 default:
1448 return DP_TRAIN_PRE_EMPHASIS_0;
1449 }
1361 } 1450 }
1362} 1451}
1363 1452
1364static void 1453static void
1365intel_get_adjust_train(struct intel_dp *intel_dp) 1454intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1366{ 1455{
1367 uint8_t v = 0; 1456 uint8_t v = 0;
1368 uint8_t p = 0; 1457 uint8_t p = 0;
1369 int lane; 1458 int lane;
1459 uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
1460 uint8_t voltage_max;
1461 uint8_t preemph_max;
1370 1462
1371 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1463 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1372 uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane); 1464 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
1373 uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); 1465 uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
1374 1466
1375 if (this_v > v) 1467 if (this_v > v)
1376 v = this_v; 1468 v = this_v;
@@ -1378,18 +1470,20 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
1378 p = this_p; 1470 p = this_p;
1379 } 1471 }
1380 1472
1381 if (v >= I830_DP_VOLTAGE_MAX) 1473 voltage_max = intel_dp_voltage_max(intel_dp);
1382 v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; 1474 if (v >= voltage_max)
1475 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1383 1476
1384 if (p >= intel_dp_pre_emphasis_max(v)) 1477 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1385 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1478 if (p >= preemph_max)
1479 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1386 1480
1387 for (lane = 0; lane < 4; lane++) 1481 for (lane = 0; lane < 4; lane++)
1388 intel_dp->train_set[lane] = v | p; 1482 intel_dp->train_set[lane] = v | p;
1389} 1483}
1390 1484
1391static uint32_t 1485static uint32_t
1392intel_dp_signal_levels(uint8_t train_set, int lane_count) 1486intel_dp_signal_levels(uint8_t train_set)
1393{ 1487{
1394 uint32_t signal_levels = 0; 1488 uint32_t signal_levels = 0;
1395 1489
@@ -1454,13 +1548,43 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
1454 } 1548 }
1455} 1549}
1456 1550
1551/* Gen7's DP voltage swing and pre-emphasis control */
1552static uint32_t
1553intel_gen7_edp_signal_levels(uint8_t train_set)
1554{
1555 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1556 DP_TRAIN_PRE_EMPHASIS_MASK);
1557 switch (signal_levels) {
1558 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1559 return EDP_LINK_TRAIN_400MV_0DB_IVB;
1560 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1561 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1562 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1563 return EDP_LINK_TRAIN_400MV_6DB_IVB;
1564
1565 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1566 return EDP_LINK_TRAIN_600MV_0DB_IVB;
1567 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1568 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1569
1570 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1571 return EDP_LINK_TRAIN_800MV_0DB_IVB;
1572 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1573 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1574
1575 default:
1576 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1577 "0x%x\n", signal_levels);
1578 return EDP_LINK_TRAIN_500MV_0DB_IVB;
1579 }
1580}
1581
1457static uint8_t 1582static uint8_t
1458intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1583intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1459 int lane) 1584 int lane)
1460{ 1585{
1461 int i = DP_LANE0_1_STATUS + (lane >> 1);
1462 int s = (lane & 1) * 4; 1586 int s = (lane & 1) * 4;
1463 uint8_t l = intel_dp_link_status(link_status, i); 1587 uint8_t l = link_status[lane>>1];
1464 1588
1465 return (l >> s) & 0xf; 1589 return (l >> s) & 0xf;
1466} 1590}
@@ -1485,18 +1609,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
1485 DP_LANE_CHANNEL_EQ_DONE|\ 1609 DP_LANE_CHANNEL_EQ_DONE|\
1486 DP_LANE_SYMBOL_LOCKED) 1610 DP_LANE_SYMBOL_LOCKED)
1487static bool 1611static bool
1488intel_channel_eq_ok(struct intel_dp *intel_dp) 1612intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1489{ 1613{
1490 uint8_t lane_align; 1614 uint8_t lane_align;
1491 uint8_t lane_status; 1615 uint8_t lane_status;
1492 int lane; 1616 int lane;
1493 1617
1494 lane_align = intel_dp_link_status(intel_dp->link_status, 1618 lane_align = intel_dp_link_status(link_status,
1495 DP_LANE_ALIGN_STATUS_UPDATED); 1619 DP_LANE_ALIGN_STATUS_UPDATED);
1496 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1620 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
1497 return false; 1621 return false;
1498 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1622 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1499 lane_status = intel_get_lane_status(intel_dp->link_status, lane); 1623 lane_status = intel_get_lane_status(link_status, lane);
1500 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1624 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
1501 return false; 1625 return false;
1502 } 1626 }
@@ -1521,8 +1645,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1521 1645
1522 ret = intel_dp_aux_native_write(intel_dp, 1646 ret = intel_dp_aux_native_write(intel_dp,
1523 DP_TRAINING_LANE0_SET, 1647 DP_TRAINING_LANE0_SET,
1524 intel_dp->train_set, 4); 1648 intel_dp->train_set,
1525 if (ret != 4) 1649 intel_dp->lane_count);
1650 if (ret != intel_dp->lane_count)
1526 return false; 1651 return false;
1527 1652
1528 return true; 1653 return true;
@@ -1538,7 +1663,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1538 int i; 1663 int i;
1539 uint8_t voltage; 1664 uint8_t voltage;
1540 bool clock_recovery = false; 1665 bool clock_recovery = false;
1541 int tries; 1666 int voltage_tries, loop_tries;
1542 u32 reg; 1667 u32 reg;
1543 uint32_t DP = intel_dp->DP; 1668 uint32_t DP = intel_dp->DP;
1544 1669
@@ -1559,26 +1684,35 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1559 DP_LINK_CONFIGURATION_SIZE); 1684 DP_LINK_CONFIGURATION_SIZE);
1560 1685
1561 DP |= DP_PORT_EN; 1686 DP |= DP_PORT_EN;
1562 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1687
1688 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1563 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1689 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1564 else 1690 else
1565 DP &= ~DP_LINK_TRAIN_MASK; 1691 DP &= ~DP_LINK_TRAIN_MASK;
1566 memset(intel_dp->train_set, 0, 4); 1692 memset(intel_dp->train_set, 0, 4);
1567 voltage = 0xff; 1693 voltage = 0xff;
1568 tries = 0; 1694 voltage_tries = 0;
1695 loop_tries = 0;
1569 clock_recovery = false; 1696 clock_recovery = false;
1570 for (;;) { 1697 for (;;) {
1571 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1698 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1699 uint8_t link_status[DP_LINK_STATUS_SIZE];
1572 uint32_t signal_levels; 1700 uint32_t signal_levels;
1573 if (IS_GEN6(dev) && is_edp(intel_dp)) { 1701
1702
1703 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1704 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1705 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1706 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1574 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1707 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1575 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1708 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1576 } else { 1709 } else {
1577 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); 1710 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1711 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
1578 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1712 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1579 } 1713 }
1580 1714
1581 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1715 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1582 reg = DP | DP_LINK_TRAIN_PAT_1_CPT; 1716 reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
1583 else 1717 else
1584 reg = DP | DP_LINK_TRAIN_PAT_1; 1718 reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1590,10 +1724,13 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1590 /* Set training pattern 1 */ 1724 /* Set training pattern 1 */
1591 1725
1592 udelay(100); 1726 udelay(100);
1593 if (!intel_dp_get_link_status(intel_dp)) 1727 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1728 DRM_ERROR("failed to get link status\n");
1594 break; 1729 break;
1730 }
1595 1731
1596 if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1732 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1733 DRM_DEBUG_KMS("clock recovery OK\n");
1597 clock_recovery = true; 1734 clock_recovery = true;
1598 break; 1735 break;
1599 } 1736 }
@@ -1602,20 +1739,30 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1602 for (i = 0; i < intel_dp->lane_count; i++) 1739 for (i = 0; i < intel_dp->lane_count; i++)
1603 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1740 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1604 break; 1741 break;
1605 if (i == intel_dp->lane_count) 1742 if (i == intel_dp->lane_count) {
1606 break; 1743 ++loop_tries;
1744 if (loop_tries == 5) {
1745 DRM_DEBUG_KMS("too many full retries, give up\n");
1746 break;
1747 }
1748 memset(intel_dp->train_set, 0, 4);
1749 voltage_tries = 0;
1750 continue;
1751 }
1607 1752
1608 /* Check to see if we've tried the same voltage 5 times */ 1753 /* Check to see if we've tried the same voltage 5 times */
1609 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1754 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1610 ++tries; 1755 ++voltage_tries;
1611 if (tries == 5) 1756 if (voltage_tries == 5) {
1757 DRM_DEBUG_KMS("too many voltage retries, give up\n");
1612 break; 1758 break;
1759 }
1613 } else 1760 } else
1614 tries = 0; 1761 voltage_tries = 0;
1615 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1762 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1616 1763
1617 /* Compute new intel_dp->train_set as requested by target */ 1764 /* Compute new intel_dp->train_set as requested by target */
1618 intel_get_adjust_train(intel_dp); 1765 intel_get_adjust_train(intel_dp, link_status);
1619 } 1766 }
1620 1767
1621 intel_dp->DP = DP; 1768 intel_dp->DP = DP;
@@ -1638,6 +1785,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1638 for (;;) { 1785 for (;;) {
1639 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1786 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1640 uint32_t signal_levels; 1787 uint32_t signal_levels;
1788 uint8_t link_status[DP_LINK_STATUS_SIZE];
1641 1789
1642 if (cr_tries > 5) { 1790 if (cr_tries > 5) {
1643 DRM_ERROR("failed to train DP, aborting\n"); 1791 DRM_ERROR("failed to train DP, aborting\n");
@@ -1645,15 +1793,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1645 break; 1793 break;
1646 } 1794 }
1647 1795
1648 if (IS_GEN6(dev) && is_edp(intel_dp)) { 1796 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1797 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1798 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1799 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1649 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1800 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1650 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1801 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1651 } else { 1802 } else {
1652 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); 1803 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1653 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1804 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1654 } 1805 }
1655 1806
1656 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1807 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1657 reg = DP | DP_LINK_TRAIN_PAT_2_CPT; 1808 reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
1658 else 1809 else
1659 reg = DP | DP_LINK_TRAIN_PAT_2; 1810 reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1665,17 +1816,17 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1665 break; 1816 break;
1666 1817
1667 udelay(400); 1818 udelay(400);
1668 if (!intel_dp_get_link_status(intel_dp)) 1819 if (!intel_dp_get_link_status(intel_dp, link_status))
1669 break; 1820 break;
1670 1821
1671 /* Make sure clock is still ok */ 1822 /* Make sure clock is still ok */
1672 if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1823 if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1673 intel_dp_start_link_train(intel_dp); 1824 intel_dp_start_link_train(intel_dp);
1674 cr_tries++; 1825 cr_tries++;
1675 continue; 1826 continue;
1676 } 1827 }
1677 1828
1678 if (intel_channel_eq_ok(intel_dp)) { 1829 if (intel_channel_eq_ok(intel_dp, link_status)) {
1679 channel_eq = true; 1830 channel_eq = true;
1680 break; 1831 break;
1681 } 1832 }
@@ -1690,11 +1841,11 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1690 } 1841 }
1691 1842
1692 /* Compute new intel_dp->train_set as requested by target */ 1843 /* Compute new intel_dp->train_set as requested by target */
1693 intel_get_adjust_train(intel_dp); 1844 intel_get_adjust_train(intel_dp, link_status);
1694 ++tries; 1845 ++tries;
1695 } 1846 }
1696 1847
1697 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1848 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1698 reg = DP | DP_LINK_TRAIN_OFF_CPT; 1849 reg = DP | DP_LINK_TRAIN_OFF_CPT;
1699 else 1850 else
1700 reg = DP | DP_LINK_TRAIN_OFF; 1851 reg = DP | DP_LINK_TRAIN_OFF;
@@ -1724,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1724 udelay(100); 1875 udelay(100);
1725 } 1876 }
1726 1877
1727 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) { 1878 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1728 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1879 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1729 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1880 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1730 } else { 1881 } else {
@@ -1735,8 +1886,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1735 1886
1736 msleep(17); 1887 msleep(17);
1737 1888
1738 if (is_edp(intel_dp)) 1889 if (is_edp(intel_dp)) {
1739 DP |= DP_LINK_TRAIN_OFF; 1890 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1891 DP |= DP_LINK_TRAIN_OFF_CPT;
1892 else
1893 DP |= DP_LINK_TRAIN_OFF;
1894 }
1740 1895
1741 if (!HAS_PCH_CPT(dev) && 1896 if (!HAS_PCH_CPT(dev) &&
1742 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1897 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@@ -1822,6 +1977,7 @@ static void
1822intel_dp_check_link_status(struct intel_dp *intel_dp) 1977intel_dp_check_link_status(struct intel_dp *intel_dp)
1823{ 1978{
1824 u8 sink_irq_vector; 1979 u8 sink_irq_vector;
1980 u8 link_status[DP_LINK_STATUS_SIZE];
1825 1981
1826 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) 1982 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
1827 return; 1983 return;
@@ -1830,7 +1986,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
1830 return; 1986 return;
1831 1987
1832 /* Try to read receiver status if the link appears to be up */ 1988 /* Try to read receiver status if the link appears to be up */
1833 if (!intel_dp_get_link_status(intel_dp)) { 1989 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1834 intel_dp_link_down(intel_dp); 1990 intel_dp_link_down(intel_dp);
1835 return; 1991 return;
1836 } 1992 }
@@ -1855,7 +2011,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
1855 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2011 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
1856 } 2012 }
1857 2013
1858 if (!intel_channel_eq_ok(intel_dp)) { 2014 if (!intel_channel_eq_ok(intel_dp, link_status)) {
1859 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2015 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
1860 drm_get_encoder_name(&intel_dp->base.base)); 2016 drm_get_encoder_name(&intel_dp->base.base));
1861 intel_dp_start_link_train(intel_dp); 2017 intel_dp_start_link_train(intel_dp);
@@ -2179,7 +2335,8 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
2179 continue; 2335 continue;
2180 2336
2181 intel_dp = enc_to_intel_dp(encoder); 2337 intel_dp = enc_to_intel_dp(encoder);
2182 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) 2338 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
2339 intel_dp->base.type == INTEL_OUTPUT_EDP)
2183 return intel_dp->output_reg; 2340 return intel_dp->output_reg;
2184 } 2341 }
2185 2342
@@ -2321,7 +2478,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2321 2478
2322 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2479 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2323 PANEL_LIGHT_ON_DELAY_SHIFT; 2480 PANEL_LIGHT_ON_DELAY_SHIFT;
2324 2481
2325 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2482 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2326 PANEL_LIGHT_OFF_DELAY_SHIFT; 2483 PANEL_LIGHT_OFF_DELAY_SHIFT;
2327 2484
@@ -2354,11 +2511,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2354 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2511 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2355 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2512 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2356 2513
2357 intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay;
2358
2359 ironlake_edp_panel_vdd_on(intel_dp); 2514 ironlake_edp_panel_vdd_on(intel_dp);
2360 ret = intel_dp_get_dpcd(intel_dp); 2515 ret = intel_dp_get_dpcd(intel_dp);
2361 ironlake_edp_panel_vdd_off(intel_dp, false); 2516 ironlake_edp_panel_vdd_off(intel_dp, false);
2517
2362 if (ret) { 2518 if (ret) {
2363 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2519 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2364 dev_priv->no_aux_handshake = 2520 dev_priv->no_aux_handshake =
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 23c56221fe8f..82a459bfccbc 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -110,6 +110,7 @@
110/* drm_display_mode->private_flags */ 110/* drm_display_mode->private_flags */
111#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) 111#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
112#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) 112#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
113#define INTEL_MODE_DP_FORCE_6BPC (0x10)
113 114
114static inline void 115static inline void
115intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, 116intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 42f165a520de..e44191132ac4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
715 DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"), 715 DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
716 }, 716 },
717 }, 717 },
718 {
719 .callback = intel_no_lvds_dmi_callback,
720 .ident = "Asus AT5NM10T-I",
721 .matches = {
722 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
723 DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
724 },
725 },
718 726
719 { } /* terminating entry */ 727 { } /* terminating entry */
720}; 728};
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 499d4c0dbeeb..04d79fd1dc9d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
178 if (HAS_PCH_SPLIT(dev)) { 178 if (HAS_PCH_SPLIT(dev)) {
179 max >>= 16; 179 max >>= 16;
180 } else { 180 } else {
181 if (IS_PINEVIEW(dev)) { 181 if (INTEL_INFO(dev)->gen < 4)
182 max >>= 17; 182 max >>= 17;
183 } else { 183 else
184 max >>= 16; 184 max >>= 16;
185 if (INTEL_INFO(dev)->gen < 4)
186 max &= ~1;
187 }
188 185
189 if (is_backlight_combination_mode(dev)) 186 if (is_backlight_combination_mode(dev))
190 max *= 0xff; 187 max *= 0xff;
@@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
203 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 200 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
204 } else { 201 } else {
205 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 202 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
206 if (IS_PINEVIEW(dev)) 203 if (INTEL_INFO(dev)->gen < 4)
207 val >>= 1; 204 val >>= 1;
208 205
209 if (is_backlight_combination_mode(dev)) { 206 if (is_backlight_combination_mode(dev)) {
210 u8 lbpc; 207 u8 lbpc;
211 208
212 val &= ~1;
213 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); 209 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
214 val *= lbpc; 210 val *= lbpc;
215 } 211 }
@@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
246 } 242 }
247 243
248 tmp = I915_READ(BLC_PWM_CTL); 244 tmp = I915_READ(BLC_PWM_CTL);
249 if (IS_PINEVIEW(dev)) { 245 if (INTEL_INFO(dev)->gen < 4)
250 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
251 level <<= 1; 246 level <<= 1;
252 } else 247 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
253 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
254 I915_WRITE(BLC_PWM_CTL, tmp | level); 248 I915_WRITE(BLC_PWM_CTL, tmp | level);
255} 249}
256 250
@@ -326,7 +320,8 @@ static int intel_panel_update_status(struct backlight_device *bd)
326static int intel_panel_get_brightness(struct backlight_device *bd) 320static int intel_panel_get_brightness(struct backlight_device *bd)
327{ 321{
328 struct drm_device *dev = bl_get_data(bd); 322 struct drm_device *dev = bl_get_data(bd);
329 return intel_panel_get_backlight(dev); 323 struct drm_i915_private *dev_priv = dev->dev_private;
324 return dev_priv->backlight_level;
330} 325}
331 326
332static const struct backlight_ops intel_panel_bl_ops = { 327static const struct backlight_ops intel_panel_bl_ops = {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3003fb25aefd..f7b9268df266 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -50,6 +50,7 @@
50#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) 50#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
51#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) 51#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
52#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) 52#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
53#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
53 54
54 55
55static const char *tv_format_names[] = { 56static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1086 } 1087 }
1087 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; 1088 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
1088 } 1089 }
1089 if (intel_crtc->pipe == 1) 1090
1090 sdvox |= SDVO_PIPE_B_SELECT; 1091 if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
1092 sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
1093 else
1094 sdvox |= TRANSCODER(intel_crtc->pipe);
1095
1091 if (intel_sdvo->has_hdmi_audio) 1096 if (intel_sdvo->has_hdmi_audio)
1092 sdvox |= SDVO_AUDIO_ENABLE; 1097 sdvox |= SDVO_AUDIO_ENABLE;
1093 1098
@@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
1314 return status; 1319 return status;
1315} 1320}
1316 1321
1322static bool
1323intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
1324 struct edid *edid)
1325{
1326 bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1327 bool connector_is_digital = !!IS_DIGITAL(sdvo);
1328
1329 DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
1330 connector_is_digital, monitor_is_digital);
1331 return connector_is_digital == monitor_is_digital;
1332}
1333
1317static enum drm_connector_status 1334static enum drm_connector_status
1318intel_sdvo_detect(struct drm_connector *connector, bool force) 1335intel_sdvo_detect(struct drm_connector *connector, bool force)
1319{ 1336{
@@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1358 if (edid == NULL) 1375 if (edid == NULL)
1359 edid = intel_sdvo_get_analog_edid(connector); 1376 edid = intel_sdvo_get_analog_edid(connector);
1360 if (edid != NULL) { 1377 if (edid != NULL) {
1361 if (edid->input & DRM_EDID_INPUT_DIGITAL) 1378 if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
1362 ret = connector_status_disconnected; 1379 edid))
1363 else
1364 ret = connector_status_connected; 1380 ret = connector_status_connected;
1381 else
1382 ret = connector_status_disconnected;
1383
1365 connector->display_info.raw_edid = NULL; 1384 connector->display_info.raw_edid = NULL;
1366 kfree(edid); 1385 kfree(edid);
1367 } else 1386 } else
@@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1402 edid = intel_sdvo_get_analog_edid(connector); 1421 edid = intel_sdvo_get_analog_edid(connector);
1403 1422
1404 if (edid != NULL) { 1423 if (edid != NULL) {
1405 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1424 if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
1406 bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); 1425 edid)) {
1407 bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
1408
1409 if (connector_is_digital == monitor_is_digital) {
1410 drm_mode_connector_update_edid_property(connector, edid); 1426 drm_mode_connector_update_edid_property(connector, edid);
1411 drm_add_edid_modes(connector, edid); 1427 drm_add_edid_modes(connector, edid);
1412 } 1428 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 032a82098136..5fc201b49d30 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -640,10 +640,9 @@ static int
640nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) 640nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
641{ 641{
642 struct drm_nouveau_private *dev_priv = dev->dev_private; 642 struct drm_nouveau_private *dev_priv = dev->dev_private;
643 uint32_t reg0 = nv_rd32(dev, reg + 0);
644 uint32_t reg1 = nv_rd32(dev, reg + 4);
645 struct nouveau_pll_vals pll; 643 struct nouveau_pll_vals pll;
646 struct pll_lims pll_limits; 644 struct pll_lims pll_limits;
645 u32 ctrl, mask, coef;
647 int ret; 646 int ret;
648 647
649 ret = get_pll_limits(dev, reg, &pll_limits); 648 ret = get_pll_limits(dev, reg, &pll_limits);
@@ -654,15 +653,20 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
654 if (!clk) 653 if (!clk)
655 return -ERANGE; 654 return -ERANGE;
656 655
657 reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); 656 coef = pll.N1 << 8 | pll.M1;
658 reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; 657 ctrl = pll.log2P << 16;
659 658 mask = 0x00070000;
660 if (dev_priv->vbios.execute) { 659 if (reg == 0x004008) {
661 still_alive(); 660 mask |= 0x01f80000;
662 nv_wr32(dev, reg + 4, reg1); 661 ctrl |= (pll_limits.log2p_bias << 19);
663 nv_wr32(dev, reg + 0, reg0); 662 ctrl |= (pll.log2P << 22);
664 } 663 }
665 664
665 if (!dev_priv->vbios.execute)
666 return 0;
667
668 nv_mask(dev, reg + 0, mask, ctrl);
669 nv_wr32(dev, reg + 4, coef);
666 return 0; 670 return 0;
667} 671}
668 672
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b1b33a108b31..f12dd0f39211 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -153,7 +153,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
153 153
154 if (dev_priv->card_type == NV_10 && 154 if (dev_priv->card_type == NV_10 &&
155 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 155 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
156 nvbo->bo.mem.num_pages < vram_pages / 2) { 156 nvbo->bo.mem.num_pages < vram_pages / 4) {
157 /* 157 /*
158 * Make sure that the color and depth buffers are handled 158 * Make sure that the color and depth buffers are handled
159 * by independent memory controller units. Up to a 9x 159 * by independent memory controller units. Up to a 9x
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index a319d5646ea9..bb6ec9ef8676 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -158,6 +158,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
158 INIT_LIST_HEAD(&chan->nvsw.vbl_wait); 158 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
159 INIT_LIST_HEAD(&chan->nvsw.flip); 159 INIT_LIST_HEAD(&chan->nvsw.flip);
160 INIT_LIST_HEAD(&chan->fence.pending); 160 INIT_LIST_HEAD(&chan->fence.pending);
161 spin_lock_init(&chan->fence.lock);
161 162
162 /* setup channel's memory and vm */ 163 /* setup channel's memory and vm */
163 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); 164 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index e0d275e1c96c..cea6696b1906 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -710,7 +710,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
710 case OUTPUT_DP: 710 case OUTPUT_DP:
711 max_clock = nv_encoder->dp.link_nr; 711 max_clock = nv_encoder->dp.link_nr;
712 max_clock *= nv_encoder->dp.link_bw; 712 max_clock *= nv_encoder->dp.link_bw;
713 clock = clock * nouveau_connector_bpp(connector) / 8; 713 clock = clock * nouveau_connector_bpp(connector) / 10;
714 break; 714 break;
715 default: 715 default:
716 BUG_ON(1); 716 BUG_ON(1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 2531ef54c3e9..7e88cd7f2b99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -369,3 +369,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
369 spin_unlock_irqrestore(&dev->event_lock, flags); 369 spin_unlock_irqrestore(&dev->event_lock, flags);
370 return 0; 370 return 0;
371} 371}
372
373int
374nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
375 struct drm_mode_create_dumb *args)
376{
377 struct nouveau_bo *bo;
378 int ret;
379
380 args->pitch = roundup(args->width * (args->bpp / 8), 256);
381 args->size = args->pitch * args->height;
382 args->size = roundup(args->size, PAGE_SIZE);
383
384 ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
385 if (ret)
386 return ret;
387
388 ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
389 drm_gem_object_unreference_unlocked(bo->gem);
390 return ret;
391}
392
393int
394nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
395 uint32_t handle)
396{
397 return drm_gem_handle_delete(file_priv, handle);
398}
399
400int
401nouveau_display_dumb_map_offset(struct drm_file *file_priv,
402 struct drm_device *dev,
403 uint32_t handle, uint64_t *poffset)
404{
405 struct drm_gem_object *gem;
406
407 gem = drm_gem_object_lookup(dev, file_priv, handle);
408 if (gem) {
409 struct nouveau_bo *bo = gem->driver_private;
410 *poffset = bo->bo.addr_space_offset;
411 drm_gem_object_unreference_unlocked(gem);
412 return 0;
413 }
414
415 return -ENOENT;
416}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index d661bc5e3945..f0a60afac446 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -434,6 +434,10 @@ static struct drm_driver driver = {
434 .gem_open_object = nouveau_gem_object_open, 434 .gem_open_object = nouveau_gem_object_open,
435 .gem_close_object = nouveau_gem_object_close, 435 .gem_close_object = nouveau_gem_object_close,
436 436
437 .dumb_create = nouveau_display_dumb_create,
438 .dumb_map_offset = nouveau_display_dumb_map_offset,
439 .dumb_destroy = nouveau_display_dumb_destroy,
440
437 .name = DRIVER_NAME, 441 .name = DRIVER_NAME,
438 .desc = DRIVER_DESC, 442 .desc = DRIVER_DESC,
439#ifdef GIT_REVISION 443#ifdef GIT_REVISION
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 0c53e39fc6c9..dfddb7e078a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1421,6 +1421,12 @@ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1421 struct drm_pending_vblank_event *event); 1421 struct drm_pending_vblank_event *event);
1422int nouveau_finish_page_flip(struct nouveau_channel *, 1422int nouveau_finish_page_flip(struct nouveau_channel *,
1423 struct nouveau_page_flip_state *); 1423 struct nouveau_page_flip_state *);
1424int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
1425 struct drm_mode_create_dumb *args);
1426int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
1427 uint32_t handle, uint64_t *offset);
1428int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
1429 uint32_t handle);
1424 1430
1425/* nv10_gpio.c */ 1431/* nv10_gpio.c */
1426int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1432int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index defffd140781..dbb151834121 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -488,6 +488,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
488{ 488{
489 struct drm_nouveau_private *dev_priv = dev->dev_private; 489 struct drm_nouveau_private *dev_priv = dev->dev_private;
490 struct nouveau_fbdev *nfbdev; 490 struct nouveau_fbdev *nfbdev;
491 int preferred_bpp;
491 int ret; 492 int ret;
492 493
493 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); 494 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
@@ -506,7 +507,15 @@ int nouveau_fbcon_init(struct drm_device *dev)
506 } 507 }
507 508
508 drm_fb_helper_single_add_all_connectors(&nfbdev->helper); 509 drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
509 drm_fb_helper_initial_config(&nfbdev->helper, 32); 510
511 if (dev_priv->vram_size <= 32 * 1024 * 1024)
512 preferred_bpp = 8;
513 else if (dev_priv->vram_size <= 64 * 1024 * 1024)
514 preferred_bpp = 16;
515 else
516 preferred_bpp = 32;
517
518 drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp);
510 return 0; 519 return 0;
511} 520}
512 521
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 81116cfea275..2f6daae68b9d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -539,8 +539,6 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
539 return ret; 539 return ret;
540 } 540 }
541 541
542 INIT_LIST_HEAD(&chan->fence.pending);
543 spin_lock_init(&chan->fence.lock);
544 atomic_set(&chan->fence.last_sequence_irq, 0); 542 atomic_set(&chan->fence.last_sequence_irq, 0);
545 return 0; 543 return 0;
546} 544}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index c6143df48b9f..d39b2202b197 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -333,7 +333,7 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
333 333
334 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); 334 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
335 335
336 for (i = 0; info[i].addr; i++) { 336 for (i = 0; i2c && info[i].addr; i++) {
337 if (nouveau_probe_i2c_addr(i2c, info[i].addr) && 337 if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
338 (!match || match(i2c, &info[i]))) { 338 (!match || match(i2c, &info[i]))) {
339 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); 339 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 02222c540aee..960c0ae0c0c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
680 return ret; 680 return ret;
681 } 681 }
682 682
683 ret = drm_mm_init(&chan->ramin_heap, base, size); 683 ret = drm_mm_init(&chan->ramin_heap, base, size - base);
684 if (ret) { 684 if (ret) {
685 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); 685 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
686 nouveau_gpuobj_ref(NULL, &chan->ramin); 686 nouveau_gpuobj_ref(NULL, &chan->ramin);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 9f178aa94162..33d03fbf00df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -239,7 +239,7 @@ nouveau_perf_init(struct drm_device *dev)
239 if(version == 0x15) { 239 if(version == 0x15) {
240 memtimings->timing = 240 memtimings->timing =
241 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); 241 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
242 if(!memtimings) { 242 if (!memtimings->timing) {
243 NV_WARN(dev,"Could not allocate memtiming table\n"); 243 NV_WARN(dev,"Could not allocate memtiming table\n");
244 return; 244 return;
245 } 245 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 82478e0998e5..d8831ab42bb9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -579,6 +579,14 @@ nouveau_card_init(struct drm_device *dev)
579 if (ret) 579 if (ret)
580 goto out_display_early; 580 goto out_display_early;
581 581
582 /* workaround an odd issue on nvc1 by disabling the device's
583 * nosnoop capability. hopefully won't cause issues until a
584 * better fix is found - assuming there is one...
585 */
586 if (dev_priv->chipset == 0xc1) {
587 nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
588 }
589
582 nouveau_pm_init(dev); 590 nouveau_pm_init(dev);
583 591
584 ret = engine->vram.init(dev); 592 ret = engine->vram.init(dev);
@@ -1102,12 +1110,13 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
1102 dev_priv->noaccel = !!nouveau_noaccel; 1110 dev_priv->noaccel = !!nouveau_noaccel;
1103 if (nouveau_noaccel == -1) { 1111 if (nouveau_noaccel == -1) {
1104 switch (dev_priv->chipset) { 1112 switch (dev_priv->chipset) {
1105 case 0xc1: /* known broken */ 1113#if 0
1106 case 0xc8: /* never tested */ 1114 case 0xXX: /* known broken */
1107 NV_INFO(dev, "acceleration disabled by default, pass " 1115 NV_INFO(dev, "acceleration disabled by default, pass "
1108 "noaccel=0 to force enable\n"); 1116 "noaccel=0 to force enable\n");
1109 dev_priv->noaccel = true; 1117 dev_priv->noaccel = true;
1110 break; 1118 break;
1119#endif
1111 default: 1120 default:
1112 dev_priv->noaccel = false; 1121 dev_priv->noaccel = false;
1113 break; 1122 break;
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index bbc0b9c7e1f7..e676b0d53478 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -57,12 +57,14 @@ read_pll_2(struct drm_device *dev, u32 reg)
57 int P = (ctrl & 0x00070000) >> 16; 57 int P = (ctrl & 0x00070000) >> 16;
58 u32 ref = 27000, clk = 0; 58 u32 ref = 27000, clk = 0;
59 59
60 if (ctrl & 0x80000000) 60 if ((ctrl & 0x80000000) && M1) {
61 clk = ref * N1 / M1; 61 clk = ref * N1 / M1;
62 62 if ((ctrl & 0x40000100) == 0x40000000) {
63 if (!(ctrl & 0x00000100)) { 63 if (M2)
64 if (ctrl & 0x40000000) 64 clk = clk * N2 / M2;
65 clk = clk * N2 / M2; 65 else
66 clk = 0;
67 }
66 } 68 }
67 69
68 return clk >> P; 70 return clk >> P;
@@ -177,6 +179,11 @@ nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
177 } 179 }
178 180
179 /* memory clock */ 181 /* memory clock */
182 if (!perflvl->memory) {
183 info->mpll_ctrl = 0x00000000;
184 goto out;
185 }
186
180 ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory, 187 ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
181 &N1, &M1, &N2, &M2, &log2P); 188 &N1, &M1, &N2, &M2, &log2P);
182 if (ret < 0) 189 if (ret < 0)
@@ -264,6 +271,9 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
264 mdelay(5); 271 mdelay(5);
265 nv_mask(dev, 0x00c040, 0x00000333, info->ctrl); 272 nv_mask(dev, 0x00c040, 0x00000333, info->ctrl);
266 273
274 if (!info->mpll_ctrl)
275 goto resume;
276
267 /* wait for vblank start on active crtcs, disable memory access */ 277 /* wait for vblank start on active crtcs, disable memory access */
268 for (i = 0; i < 2; i++) { 278 for (i = 0; i < 2; i++) {
269 if (!(crtc_mask & (1 << i))) 279 if (!(crtc_mask & (1 << i)))
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index d23ca00e7d62..06de250fe617 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -616,7 +616,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
616 struct drm_nouveau_private *dev_priv = dev->dev_private; 616 struct drm_nouveau_private *dev_priv = dev->dev_private;
617 struct nv50_display *disp = nv50_display(dev); 617 struct nv50_display *disp = nv50_display(dev);
618 u32 unk30 = nv_rd32(dev, 0x610030), mc; 618 u32 unk30 = nv_rd32(dev, 0x610030), mc;
619 int i, crtc, or, type = OUTPUT_ANY; 619 int i, crtc, or = 0, type = OUTPUT_ANY;
620 620
621 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 621 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
622 disp->irq.dcb = NULL; 622 disp->irq.dcb = NULL;
@@ -708,7 +708,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
708 struct nv50_display *disp = nv50_display(dev); 708 struct nv50_display *disp = nv50_display(dev);
709 u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0; 709 u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
710 struct dcb_entry *dcb; 710 struct dcb_entry *dcb;
711 int i, crtc, or, type = OUTPUT_ANY; 711 int i, crtc, or = 0, type = OUTPUT_ANY;
712 712
713 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 713 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
714 dcb = disp->irq.dcb; 714 dcb = disp->irq.dcb;
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8c979b31ff61..ac601f7c4e1a 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -131,8 +131,8 @@ nv50_graph_init(struct drm_device *dev, int engine)
131 NV_DEBUG(dev, "\n"); 131 NV_DEBUG(dev, "\n");
132 132
133 /* master reset */ 133 /* master reset */
134 nv_mask(dev, 0x000200, 0x00200100, 0x00000000); 134 nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
135 nv_mask(dev, 0x000200, 0x00200100, 0x00200100); 135 nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
136 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ 136 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
137 137
138 /* reset/enable traps and interrupts */ 138 /* reset/enable traps and interrupts */
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index d05c2c3b2444..4b46d6968566 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -601,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
601 gr_def(ctx, offset + 0x1c, 0x00880000); 601 gr_def(ctx, offset + 0x1c, 0x00880000);
602 break; 602 break;
603 case 0x86: 603 case 0x86:
604 gr_def(ctx, offset + 0x1c, 0x008c0000); 604 gr_def(ctx, offset + 0x1c, 0x018c0000);
605 break; 605 break;
606 case 0x92: 606 case 0x92:
607 case 0x96: 607 case 0x96:
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index 9da23838e63e..2e45e57fd869 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -160,7 +160,7 @@ nv50_vram_rblock(struct drm_device *dev)
160 colbits = (r4 & 0x0000f000) >> 12; 160 colbits = (r4 & 0x0000f000) >> 12;
161 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; 161 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
162 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; 162 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
163 banks = ((r4 & 0x01000000) ? 8 : 4); 163 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
164 164
165 rowsize = parts * banks * (1 << colbits) * 8; 165 rowsize = parts * banks * (1 << colbits) * 8;
166 predicted = rowsize << rowbitsa; 166 predicted = rowsize << rowbitsa;
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index bbdbc51830c8..ecfafd70cf0e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -157,8 +157,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
157 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); 157 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
158 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; 158 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
159 struct drm_device *dev = chan->dev; 159 struct drm_device *dev = chan->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
160 int i = 0, gpc, tp, ret; 161 int i = 0, gpc, tp, ret;
161 u32 magic;
162 162
163 ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM, 163 ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
164 &grch->unk408004); 164 &grch->unk408004);
@@ -207,14 +207,37 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
207 nv_wo32(grch->mmio, i++ * 4, 0x0041880c); 207 nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
208 nv_wo32(grch->mmio, i++ * 4, 0x80000018); 208 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
209 209
210 magic = 0x02180000; 210 if (dev_priv->chipset != 0xc1) {
211 nv_wo32(grch->mmio, i++ * 4, 0x00405830); 211 u32 magic = 0x02180000;
212 nv_wo32(grch->mmio, i++ * 4, magic); 212 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
213 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 213 nv_wo32(grch->mmio, i++ * 4, magic);
214 for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) { 214 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
215 u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); 215 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
216 nv_wo32(grch->mmio, i++ * 4, reg); 216 u32 reg = TP_UNIT(gpc, tp, 0x520);
217 nv_wo32(grch->mmio, i++ * 4, magic); 217 nv_wo32(grch->mmio, i++ * 4, reg);
218 nv_wo32(grch->mmio, i++ * 4, magic);
219 magic += 0x0324;
220 }
221 }
222 } else {
223 u32 magic = 0x02180000;
224 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
225 nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218);
226 nv_wo32(grch->mmio, i++ * 4, 0x004064c4);
227 nv_wo32(grch->mmio, i++ * 4, 0x0086ffff);
228 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
229 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
230 u32 reg = TP_UNIT(gpc, tp, 0x520);
231 nv_wo32(grch->mmio, i++ * 4, reg);
232 nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic);
233 magic += 0x0324;
234 }
235 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
236 u32 reg = TP_UNIT(gpc, tp, 0x544);
237 nv_wo32(grch->mmio, i++ * 4, reg);
238 nv_wo32(grch->mmio, i++ * 4, magic);
239 magic += 0x0324;
240 }
218 } 241 }
219 } 242 }
220 243
@@ -358,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
358 u8 tpnr[GPC_MAX]; 381 u8 tpnr[GPC_MAX];
359 int i, gpc, tpc; 382 int i, gpc, tpc;
360 383
384 nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
385
361 /* 386 /*
362 * TP ROP UNKVAL(magic_not_rop_nr) 387 * TP ROP UNKVAL(magic_not_rop_nr)
363 * 450: 4/0/0/0 2 3 388 * 450: 4/0/0/0 2 3
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index dd0e6a736b3b..96b0b93d94ca 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1812,6 +1812,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1812 /* calculate first set of magics */ 1812 /* calculate first set of magics */
1813 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); 1813 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1814 1814
1815 gpc = -1;
1815 for (tp = 0; tp < priv->tp_total; tp++) { 1816 for (tp = 0; tp < priv->tp_total; tp++) {
1816 do { 1817 do {
1817 gpc = (gpc + 1) % priv->gpc_nr; 1818 gpc = (gpc + 1) % priv->gpc_nr;
@@ -1861,30 +1862,26 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1861 1862
1862 if (1) { 1863 if (1) {
1863 u32 tp_mask = 0, tp_set = 0; 1864 u32 tp_mask = 0, tp_set = 0;
1864 u8 tpnr[GPC_MAX]; 1865 u8 tpnr[GPC_MAX], a, b;
1865 1866
1866 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); 1867 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1867 for (gpc = 0; gpc < priv->gpc_nr; gpc++) 1868 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
1868 tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8); 1869 tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
1869 1870
1870 gpc = -1; 1871 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
1871 for (i = 0, gpc = -1; i < 32; i++) { 1872 a = (i * (priv->tp_total - 1)) / 32;
1872 int ltp = i * (priv->tp_total - 1) / 32; 1873 if (a != b) {
1873 1874 b = a;
1874 do { 1875 do {
1875 gpc = (gpc + 1) % priv->gpc_nr; 1876 gpc = (gpc + 1) % priv->gpc_nr;
1876 } while (!tpnr[gpc]); 1877 } while (!tpnr[gpc]);
1877 tp = priv->tp_nr[gpc] - tpnr[gpc]--; 1878 tp = priv->tp_nr[gpc] - tpnr[gpc]--;
1878 1879
1879 tp_set |= 1 << ((gpc * 8) + tp); 1880 tp_set |= 1 << ((gpc * 8) + tp);
1881 }
1880 1882
1881 do { 1883 nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
1882 nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); 1884 nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask);
1883 tp_set ^= tp_mask;
1884 nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set);
1885 tp_set ^= tp_mask;
1886 } while (ltp == (++i * (priv->tp_total - 1) / 32));
1887 i--;
1888 } 1885 }
1889 } 1886 }
1890 1887
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index edbfe9360ae2..ce984d573a51 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -43,7 +43,7 @@ static const u8 types[256] = {
43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, 44 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
46 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 46 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
47 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3, 47 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
48 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, 48 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
49 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0 49 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
@@ -110,22 +110,26 @@ nvc0_vram_init(struct drm_device *dev)
110 u32 bsize = nv_rd32(dev, 0x10f20c); 110 u32 bsize = nv_rd32(dev, 0x10f20c);
111 u32 offset, length; 111 u32 offset, length;
112 bool uniform = true; 112 bool uniform = true;
113 int ret, i; 113 int ret, part;
114 114
115 NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800)); 115 NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
116 NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize); 116 NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize);
117 117
118 /* read amount of vram attached to each memory controller */ 118 /* read amount of vram attached to each memory controller */
119 for (i = 0; i < parts; i++) { 119 part = 0;
120 u32 psize = nv_rd32(dev, 0x11020c + (i * 0x1000)); 120 while (parts) {
121 u32 psize = nv_rd32(dev, 0x11020c + (part++ * 0x1000));
122 if (psize == 0)
123 continue;
124 parts--;
125
121 if (psize != bsize) { 126 if (psize != bsize) {
122 if (psize < bsize) 127 if (psize < bsize)
123 bsize = psize; 128 bsize = psize;
124 uniform = false; 129 uniform = false;
125 } 130 }
126 131
127 NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", i, psize); 132 NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
128
129 dev_priv->vram_size += (u64)psize << 20; 133 dev_priv->vram_size += (u64)psize << 20;
130 } 134 }
131 135
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 23d63b4b3d77..cb006a718e70 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -780,7 +780,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
780 continue; 780 continue;
781 781
782 if (nv_partner != nv_encoder && 782 if (nv_partner != nv_encoder &&
783 nv_partner->dcb->or == nv_encoder->or) { 783 nv_partner->dcb->or == nv_encoder->dcb->or) {
784 if (nv_partner->last_dpms == DRM_MODE_DPMS_ON) 784 if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
785 return; 785 return;
786 break; 786 break;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 7567ff2510e0..457bbad3cbf9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1107 return -EINVAL; 1107 return -EINVAL;
1108 } 1108 }
1109 1109
1110 if (tiling_flags & RADEON_TILING_MACRO) 1110 if (tiling_flags & RADEON_TILING_MACRO) {
1111 if (rdev->family >= CHIP_CAYMAN)
1112 tmp = rdev->config.cayman.tile_config;
1113 else
1114 tmp = rdev->config.evergreen.tile_config;
1115
1116 switch ((tmp & 0xf0) >> 4) {
1117 case 0: /* 4 banks */
1118 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
1119 break;
1120 case 1: /* 8 banks */
1121 default:
1122 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
1123 break;
1124 case 2: /* 16 banks */
1125 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
1126 break;
1127 }
1128
1129 switch ((tmp & 0xf000) >> 12) {
1130 case 0: /* 1KB rows */
1131 default:
1132 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
1133 break;
1134 case 1: /* 2KB rows */
1135 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
1136 break;
1137 case 2: /* 4KB rows */
1138 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
1139 break;
1140 }
1141
1111 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); 1142 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
1112 else if (tiling_flags & RADEON_TILING_MICRO) 1143 } else if (tiling_flags & RADEON_TILING_MICRO)
1113 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1144 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1114 1145
1115 switch (radeon_crtc->crtc_id) { 1146 switch (radeon_crtc->crtc_id) {
@@ -1522,12 +1553,6 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
1522 struct drm_display_mode *mode, 1553 struct drm_display_mode *mode,
1523 struct drm_display_mode *adjusted_mode) 1554 struct drm_display_mode *adjusted_mode)
1524{ 1555{
1525 struct drm_device *dev = crtc->dev;
1526 struct radeon_device *rdev = dev->dev_private;
1527
1528 /* adjust pm to upcoming mode change */
1529 radeon_pm_compute_clocks(rdev);
1530
1531 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 1556 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
1532 return false; 1557 return false;
1533 return true; 1558 return true;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index a0de48542f71..6fb335a4fdda 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -283,7 +283,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
283 } 283 }
284 } 284 }
285 285
286 DRM_ERROR("aux i2c too many retries, giving up\n"); 286 DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
287 return -EREMOTEIO; 287 return -EREMOTEIO;
288} 288}
289 289
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e4c384b9511c..5e00d1670aa9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
82{ 82{
83 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 83 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
84 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); 84 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
85 int i;
85 86
86 /* Lock the graphics update lock */ 87 /* Lock the graphics update lock */
87 tmp |= EVERGREEN_GRPH_UPDATE_LOCK; 88 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
99 (u32)crtc_base); 100 (u32)crtc_base);
100 101
101 /* Wait for update_pending to go high. */ 102 /* Wait for update_pending to go high. */
102 while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)); 103 for (i = 0; i < rdev->usec_timeout; i++) {
104 if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
105 break;
106 udelay(1);
107 }
103 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 108 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
104 109
105 /* Unlock the lock, so double-buffering can take place inside vblank */ 110 /* Unlock the lock, so double-buffering can take place inside vblank */
@@ -157,6 +162,57 @@ int sumo_get_temp(struct radeon_device *rdev)
157 return actual_temp * 1000; 162 return actual_temp * 1000;
158} 163}
159 164
165void sumo_pm_init_profile(struct radeon_device *rdev)
166{
167 int idx;
168
169 /* default */
170 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
171 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
172 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
173 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
174
175 /* low,mid sh/mh */
176 if (rdev->flags & RADEON_IS_MOBILITY)
177 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
178 else
179 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
180
181 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
182 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
183 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
184 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
185
186 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
187 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
188 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
189 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
190
191 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
192 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
193 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
194 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
195
196 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
197 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
198 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
199 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
200
201 /* high sh/mh */
202 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
203 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
204 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
205 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
206 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
207 rdev->pm.power_state[idx].num_clock_modes - 1;
208
209 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
210 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
211 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
212 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
213 rdev->pm.power_state[idx].num_clock_modes - 1;
214}
215
160void evergreen_pm_misc(struct radeon_device *rdev) 216void evergreen_pm_misc(struct radeon_device *rdev)
161{ 217{
162 int req_ps_idx = rdev->pm.requested_power_state_index; 218 int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -1219,7 +1275,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
1219 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1275 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1220 rdev->mc.vram_end >> 12); 1276 rdev->mc.vram_end >> 12);
1221 } 1277 }
1222 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); 1278 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1223 if (rdev->flags & RADEON_IS_IGP) { 1279 if (rdev->flags & RADEON_IS_IGP) {
1224 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; 1280 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1225 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; 1281 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 7fdfa8ea7570..cd4590aae154 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -38,6 +38,7 @@ struct evergreen_cs_track {
38 u32 group_size; 38 u32 group_size;
39 u32 nbanks; 39 u32 nbanks;
40 u32 npipes; 40 u32 npipes;
41 u32 row_size;
41 /* value we track */ 42 /* value we track */
42 u32 nsamples; 43 u32 nsamples;
43 u32 cb_color_base_last[12]; 44 u32 cb_color_base_last[12];
@@ -77,6 +78,44 @@ struct evergreen_cs_track {
77 struct radeon_bo *db_s_write_bo; 78 struct radeon_bo *db_s_write_bo;
78}; 79};
79 80
81static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
82{
83 if (tiling_flags & RADEON_TILING_MACRO)
84 return ARRAY_2D_TILED_THIN1;
85 else if (tiling_flags & RADEON_TILING_MICRO)
86 return ARRAY_1D_TILED_THIN1;
87 else
88 return ARRAY_LINEAR_GENERAL;
89}
90
91static u32 evergreen_cs_get_num_banks(u32 nbanks)
92{
93 switch (nbanks) {
94 case 2:
95 return ADDR_SURF_2_BANK;
96 case 4:
97 return ADDR_SURF_4_BANK;
98 case 8:
99 default:
100 return ADDR_SURF_8_BANK;
101 case 16:
102 return ADDR_SURF_16_BANK;
103 }
104}
105
106static u32 evergreen_cs_get_tile_split(u32 row_size)
107{
108 switch (row_size) {
109 case 1:
110 default:
111 return ADDR_SURF_TILE_SPLIT_1KB;
112 case 2:
113 return ADDR_SURF_TILE_SPLIT_2KB;
114 case 4:
115 return ADDR_SURF_TILE_SPLIT_4KB;
116 }
117}
118
80static void evergreen_cs_track_init(struct evergreen_cs_track *track) 119static void evergreen_cs_track_init(struct evergreen_cs_track *track)
81{ 120{
82 int i; 121 int i;
@@ -480,21 +519,22 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
480 } 519 }
481 break; 520 break;
482 case DB_Z_INFO: 521 case DB_Z_INFO:
483 r = evergreen_cs_packet_next_reloc(p, &reloc);
484 if (r) {
485 dev_warn(p->dev, "bad SET_CONTEXT_REG "
486 "0x%04X\n", reg);
487 return -EINVAL;
488 }
489 track->db_z_info = radeon_get_ib_value(p, idx); 522 track->db_z_info = radeon_get_ib_value(p, idx);
490 ib[idx] &= ~Z_ARRAY_MODE(0xf); 523 if (!p->keep_tiling_flags) {
491 track->db_z_info &= ~Z_ARRAY_MODE(0xf); 524 r = evergreen_cs_packet_next_reloc(p, &reloc);
492 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 525 if (r) {
493 ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 526 dev_warn(p->dev, "bad SET_CONTEXT_REG "
494 track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 527 "0x%04X\n", reg);
495 } else { 528 return -EINVAL;
496 ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 529 }
497 track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 530 ib[idx] &= ~Z_ARRAY_MODE(0xf);
531 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
532 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
533 track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
534 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
535 ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
536 ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
537 }
498 } 538 }
499 break; 539 break;
500 case DB_STENCIL_INFO: 540 case DB_STENCIL_INFO:
@@ -607,40 +647,34 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
607 case CB_COLOR5_INFO: 647 case CB_COLOR5_INFO:
608 case CB_COLOR6_INFO: 648 case CB_COLOR6_INFO:
609 case CB_COLOR7_INFO: 649 case CB_COLOR7_INFO:
610 r = evergreen_cs_packet_next_reloc(p, &reloc);
611 if (r) {
612 dev_warn(p->dev, "bad SET_CONTEXT_REG "
613 "0x%04X\n", reg);
614 return -EINVAL;
615 }
616 tmp = (reg - CB_COLOR0_INFO) / 0x3c; 650 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
617 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 651 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
618 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 652 if (!p->keep_tiling_flags) {
619 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 653 r = evergreen_cs_packet_next_reloc(p, &reloc);
620 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 654 if (r) {
621 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 655 dev_warn(p->dev, "bad SET_CONTEXT_REG "
622 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 656 "0x%04X\n", reg);
623 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 657 return -EINVAL;
658 }
659 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
660 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
624 } 661 }
625 break; 662 break;
626 case CB_COLOR8_INFO: 663 case CB_COLOR8_INFO:
627 case CB_COLOR9_INFO: 664 case CB_COLOR9_INFO:
628 case CB_COLOR10_INFO: 665 case CB_COLOR10_INFO:
629 case CB_COLOR11_INFO: 666 case CB_COLOR11_INFO:
630 r = evergreen_cs_packet_next_reloc(p, &reloc);
631 if (r) {
632 dev_warn(p->dev, "bad SET_CONTEXT_REG "
633 "0x%04X\n", reg);
634 return -EINVAL;
635 }
636 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; 667 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
637 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 668 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
638 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 669 if (!p->keep_tiling_flags) {
639 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 670 r = evergreen_cs_packet_next_reloc(p, &reloc);
640 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 671 if (r) {
641 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 672 dev_warn(p->dev, "bad SET_CONTEXT_REG "
642 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 673 "0x%04X\n", reg);
643 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 674 return -EINVAL;
675 }
676 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
677 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
644 } 678 }
645 break; 679 break;
646 case CB_COLOR0_PITCH: 680 case CB_COLOR0_PITCH:
@@ -695,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
695 case CB_COLOR9_ATTRIB: 729 case CB_COLOR9_ATTRIB:
696 case CB_COLOR10_ATTRIB: 730 case CB_COLOR10_ATTRIB:
697 case CB_COLOR11_ATTRIB: 731 case CB_COLOR11_ATTRIB:
732 r = evergreen_cs_packet_next_reloc(p, &reloc);
733 if (r) {
734 dev_warn(p->dev, "bad SET_CONTEXT_REG "
735 "0x%04X\n", reg);
736 return -EINVAL;
737 }
738 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
739 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
740 ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
741 }
698 break; 742 break;
699 case CB_COLOR0_DIM: 743 case CB_COLOR0_DIM:
700 case CB_COLOR1_DIM: 744 case CB_COLOR1_DIM:
@@ -1311,10 +1355,16 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1311 return -EINVAL; 1355 return -EINVAL;
1312 } 1356 }
1313 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1357 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1314 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1358 if (!p->keep_tiling_flags) {
1315 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 1359 ib[idx+1+(i*8)+1] |=
1316 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1360 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1317 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 1361 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1362 ib[idx+1+(i*8)+6] |=
1363 TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
1364 ib[idx+1+(i*8)+7] |=
1365 TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1366 }
1367 }
1318 texture = reloc->robj; 1368 texture = reloc->robj;
1319 /* tex mip base */ 1369 /* tex mip base */
1320 r = evergreen_cs_packet_next_reloc(p, &reloc); 1370 r = evergreen_cs_packet_next_reloc(p, &reloc);
@@ -1414,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
1414{ 1464{
1415 struct radeon_cs_packet pkt; 1465 struct radeon_cs_packet pkt;
1416 struct evergreen_cs_track *track; 1466 struct evergreen_cs_track *track;
1467 u32 tmp;
1417 int r; 1468 int r;
1418 1469
1419 if (p->track == NULL) { 1470 if (p->track == NULL) {
@@ -1422,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
1422 if (track == NULL) 1473 if (track == NULL)
1423 return -ENOMEM; 1474 return -ENOMEM;
1424 evergreen_cs_track_init(track); 1475 evergreen_cs_track_init(track);
1425 track->npipes = p->rdev->config.evergreen.tiling_npipes; 1476 if (p->rdev->family >= CHIP_CAYMAN)
1426 track->nbanks = p->rdev->config.evergreen.tiling_nbanks; 1477 tmp = p->rdev->config.cayman.tile_config;
1427 track->group_size = p->rdev->config.evergreen.tiling_group_size; 1478 else
1479 tmp = p->rdev->config.evergreen.tile_config;
1480
1481 switch (tmp & 0xf) {
1482 case 0:
1483 track->npipes = 1;
1484 break;
1485 case 1:
1486 default:
1487 track->npipes = 2;
1488 break;
1489 case 2:
1490 track->npipes = 4;
1491 break;
1492 case 3:
1493 track->npipes = 8;
1494 break;
1495 }
1496
1497 switch ((tmp & 0xf0) >> 4) {
1498 case 0:
1499 track->nbanks = 4;
1500 break;
1501 case 1:
1502 default:
1503 track->nbanks = 8;
1504 break;
1505 case 2:
1506 track->nbanks = 16;
1507 break;
1508 }
1509
1510 switch ((tmp & 0xf00) >> 8) {
1511 case 0:
1512 track->group_size = 256;
1513 break;
1514 case 1:
1515 default:
1516 track->group_size = 512;
1517 break;
1518 }
1519
1520 switch ((tmp & 0xf000) >> 12) {
1521 case 0:
1522 track->row_size = 1;
1523 break;
1524 case 1:
1525 default:
1526 track->row_size = 2;
1527 break;
1528 case 2:
1529 track->row_size = 4;
1530 break;
1531 }
1532
1428 p->track = track; 1533 p->track = track;
1429 } 1534 }
1430 do { 1535 do {
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index c781c92c3451..7d7f2155e34c 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -42,6 +42,17 @@
42# define EVERGREEN_GRPH_DEPTH_8BPP 0 42# define EVERGREEN_GRPH_DEPTH_8BPP 0
43# define EVERGREEN_GRPH_DEPTH_16BPP 1 43# define EVERGREEN_GRPH_DEPTH_16BPP 1
44# define EVERGREEN_GRPH_DEPTH_32BPP 2 44# define EVERGREEN_GRPH_DEPTH_32BPP 2
45# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
46# define EVERGREEN_ADDR_SURF_2_BANK 0
47# define EVERGREEN_ADDR_SURF_4_BANK 1
48# define EVERGREEN_ADDR_SURF_8_BANK 2
49# define EVERGREEN_ADDR_SURF_16_BANK 3
50# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
51# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
52# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
53# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
54# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
55# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
45# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8) 56# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
46/* 8 BPP */ 57/* 8 BPP */
47# define EVERGREEN_GRPH_FORMAT_INDEXED 0 58# define EVERGREEN_GRPH_FORMAT_INDEXED 0
@@ -61,6 +72,24 @@
61# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5 72# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
62# define EVERGREEN_GRPH_FORMAT_RGB111110 6 73# define EVERGREEN_GRPH_FORMAT_RGB111110 6
63# define EVERGREEN_GRPH_FORMAT_BGR101111 7 74# define EVERGREEN_GRPH_FORMAT_BGR101111 7
75# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
76# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
77# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
78# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
79# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
80# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
81# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
82# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
83# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
84# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
85# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
86# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
87# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
88# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
89# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
90# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
91# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
92# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
64# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) 93# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
65# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0 94# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
66# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1 95# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b937c49054d9..e00039e59a75 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -899,6 +899,10 @@
899#define DB_HTILE_DATA_BASE 0x28014 899#define DB_HTILE_DATA_BASE 0x28014
900#define DB_Z_INFO 0x28040 900#define DB_Z_INFO 0x28040
901# define Z_ARRAY_MODE(x) ((x) << 4) 901# define Z_ARRAY_MODE(x) ((x) << 4)
902# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
903# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
904# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
905# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
902#define DB_STENCIL_INFO 0x28044 906#define DB_STENCIL_INFO 0x28044
903#define DB_Z_READ_BASE 0x28048 907#define DB_Z_READ_BASE 0x28048
904#define DB_STENCIL_READ_BASE 0x2804c 908#define DB_STENCIL_READ_BASE 0x2804c
@@ -951,6 +955,29 @@
951# define CB_SF_EXPORT_FULL 0 955# define CB_SF_EXPORT_FULL 0
952# define CB_SF_EXPORT_NORM 1 956# define CB_SF_EXPORT_NORM 1
953#define CB_COLOR0_ATTRIB 0x28c74 957#define CB_COLOR0_ATTRIB 0x28c74
958# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
959# define ADDR_SURF_TILE_SPLIT_64B 0
960# define ADDR_SURF_TILE_SPLIT_128B 1
961# define ADDR_SURF_TILE_SPLIT_256B 2
962# define ADDR_SURF_TILE_SPLIT_512B 3
963# define ADDR_SURF_TILE_SPLIT_1KB 4
964# define ADDR_SURF_TILE_SPLIT_2KB 5
965# define ADDR_SURF_TILE_SPLIT_4KB 6
966# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
967# define ADDR_SURF_2_BANK 0
968# define ADDR_SURF_4_BANK 1
969# define ADDR_SURF_8_BANK 2
970# define ADDR_SURF_16_BANK 3
971# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
972# define ADDR_SURF_BANK_WIDTH_1 0
973# define ADDR_SURF_BANK_WIDTH_2 1
974# define ADDR_SURF_BANK_WIDTH_4 2
975# define ADDR_SURF_BANK_WIDTH_8 3
976# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
977# define ADDR_SURF_BANK_HEIGHT_1 0
978# define ADDR_SURF_BANK_HEIGHT_2 1
979# define ADDR_SURF_BANK_HEIGHT_4 2
980# define ADDR_SURF_BANK_HEIGHT_8 3
954#define CB_COLOR0_DIM 0x28c78 981#define CB_COLOR0_DIM 0x28c78
955/* only CB0-7 blocks have these regs */ 982/* only CB0-7 blocks have these regs */
956#define CB_COLOR0_CMASK 0x28c7c 983#define CB_COLOR0_CMASK 0x28c7c
@@ -1137,7 +1164,11 @@
1137# define SQ_SEL_1 5 1164# define SQ_SEL_1 5
1138#define SQ_TEX_RESOURCE_WORD5_0 0x30014 1165#define SQ_TEX_RESOURCE_WORD5_0 0x30014
1139#define SQ_TEX_RESOURCE_WORD6_0 0x30018 1166#define SQ_TEX_RESOURCE_WORD6_0 0x30018
1167# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
1140#define SQ_TEX_RESOURCE_WORD7_0 0x3001c 1168#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
1169# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
1170# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
1171# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
1141 1172
1142#define SQ_VTX_CONSTANT_WORD0_0 0x30000 1173#define SQ_VTX_CONSTANT_WORD0_0 0x30000
1143#define SQ_VTX_CONSTANT_WORD1_0 0x30004 1174#define SQ_VTX_CONSTANT_WORD1_0 0x30004
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ad158ea49901..bfc08f6320f8 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
187{ 187{
188 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 188 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
189 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; 189 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
190 int i;
190 191
191 /* Lock the graphics update lock */ 192 /* Lock the graphics update lock */
192 /* update the scanout addresses */ 193 /* update the scanout addresses */
193 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 194 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
194 195
195 /* Wait for update_pending to go high. */ 196 /* Wait for update_pending to go high. */
196 while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)); 197 for (i = 0; i < rdev->usec_timeout; i++) {
198 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
199 break;
200 udelay(1);
201 }
197 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 202 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
198 203
199 /* Unlock the lock, so double-buffering can take place inside vblank */ 204 /* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 400b26df652a..c93bc64707e1 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -701,16 +701,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
701 return r; 701 return r;
702 } 702 }
703 703
704 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 704 if (p->keep_tiling_flags) {
705 tile_flags |= R300_TXO_MACRO_TILE; 705 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
706 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 706 ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
707 tile_flags |= R300_TXO_MICRO_TILE; 707 } else {
708 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 708 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
709 tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 709 tile_flags |= R300_TXO_MACRO_TILE;
710 710 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
711 tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 711 tile_flags |= R300_TXO_MICRO_TILE;
712 tmp |= tile_flags; 712 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
713 ib[idx] = tmp; 713 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
714
715 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
716 tmp |= tile_flags;
717 ib[idx] = tmp;
718 }
714 track->textures[i].robj = reloc->robj; 719 track->textures[i].robj = reloc->robj;
715 track->tex_dirty = true; 720 track->tex_dirty = true;
716 break; 721 break;
@@ -760,24 +765,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
760 /* RB3D_COLORPITCH1 */ 765 /* RB3D_COLORPITCH1 */
761 /* RB3D_COLORPITCH2 */ 766 /* RB3D_COLORPITCH2 */
762 /* RB3D_COLORPITCH3 */ 767 /* RB3D_COLORPITCH3 */
763 r = r100_cs_packet_next_reloc(p, &reloc); 768 if (!p->keep_tiling_flags) {
764 if (r) { 769 r = r100_cs_packet_next_reloc(p, &reloc);
765 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 770 if (r) {
766 idx, reg); 771 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
767 r100_cs_dump_packet(p, pkt); 772 idx, reg);
768 return r; 773 r100_cs_dump_packet(p, pkt);
769 } 774 return r;
775 }
770 776
771 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 777 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
772 tile_flags |= R300_COLOR_TILE_ENABLE; 778 tile_flags |= R300_COLOR_TILE_ENABLE;
773 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 779 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
774 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 780 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
775 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 781 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
776 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 782 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
777 783
778 tmp = idx_value & ~(0x7 << 16); 784 tmp = idx_value & ~(0x7 << 16);
779 tmp |= tile_flags; 785 tmp |= tile_flags;
780 ib[idx] = tmp; 786 ib[idx] = tmp;
787 }
781 i = (reg - 0x4E38) >> 2; 788 i = (reg - 0x4E38) >> 2;
782 track->cb[i].pitch = idx_value & 0x3FFE; 789 track->cb[i].pitch = idx_value & 0x3FFE;
783 switch (((idx_value >> 21) & 0xF)) { 790 switch (((idx_value >> 21) & 0xF)) {
@@ -843,25 +850,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
843 break; 850 break;
844 case 0x4F24: 851 case 0x4F24:
845 /* ZB_DEPTHPITCH */ 852 /* ZB_DEPTHPITCH */
846 r = r100_cs_packet_next_reloc(p, &reloc); 853 if (!p->keep_tiling_flags) {
847 if (r) { 854 r = r100_cs_packet_next_reloc(p, &reloc);
848 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 855 if (r) {
849 idx, reg); 856 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
850 r100_cs_dump_packet(p, pkt); 857 idx, reg);
851 return r; 858 r100_cs_dump_packet(p, pkt);
852 } 859 return r;
853 860 }
854 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
855 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
856 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
857 tile_flags |= R300_DEPTHMICROTILE_TILED;
858 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
859 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
860 861
861 tmp = idx_value & ~(0x7 << 16); 862 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
862 tmp |= tile_flags; 863 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
863 ib[idx] = tmp; 864 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
865 tile_flags |= R300_DEPTHMICROTILE_TILED;
866 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
867 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
864 868
869 tmp = idx_value & ~(0x7 << 16);
870 tmp |= tile_flags;
871 ib[idx] = tmp;
872 }
865 track->zb.pitch = idx_value & 0x3FFC; 873 track->zb.pitch = idx_value & 0x3FFC;
866 track->zb_dirty = true; 874 track->zb_dirty = true;
867 break; 875 break;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 19afc43ad173..9cdda0b3b081 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -288,24 +288,6 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev)
288 pcie_lanes); 288 pcie_lanes);
289} 289}
290 290
291static int r600_pm_get_type_index(struct radeon_device *rdev,
292 enum radeon_pm_state_type ps_type,
293 int instance)
294{
295 int i;
296 int found_instance = -1;
297
298 for (i = 0; i < rdev->pm.num_power_states; i++) {
299 if (rdev->pm.power_state[i].type == ps_type) {
300 found_instance++;
301 if (found_instance == instance)
302 return i;
303 }
304 }
305 /* return default if no match */
306 return rdev->pm.default_power_state_index;
307}
308
309void rs780_pm_init_profile(struct radeon_device *rdev) 291void rs780_pm_init_profile(struct radeon_device *rdev)
310{ 292{
311 if (rdev->pm.num_power_states == 2) { 293 if (rdev->pm.num_power_states == 2) {
@@ -421,6 +403,8 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
421 403
422void r600_pm_init_profile(struct radeon_device *rdev) 404void r600_pm_init_profile(struct radeon_device *rdev)
423{ 405{
406 int idx;
407
424 if (rdev->family == CHIP_R600) { 408 if (rdev->family == CHIP_R600) {
425 /* XXX */ 409 /* XXX */
426 /* default */ 410 /* default */
@@ -502,81 +486,43 @@ void r600_pm_init_profile(struct radeon_device *rdev)
502 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 486 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
503 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 487 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
504 /* low sh */ 488 /* low sh */
505 if (rdev->flags & RADEON_IS_MOBILITY) { 489 if (rdev->flags & RADEON_IS_MOBILITY)
506 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 490 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
507 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 491 else
508 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 492 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
509 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 493 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
510 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 494 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
511 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 495 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
512 } else { 496 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
513 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
514 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
515 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
516 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
517 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
518 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
519 }
520 /* mid sh */ 497 /* mid sh */
521 if (rdev->flags & RADEON_IS_MOBILITY) { 498 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
522 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 499 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
523 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 500 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
524 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 501 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
525 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
526 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
527 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
528 } else {
529 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
530 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
531 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
532 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
533 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
534 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
535 }
536 /* high sh */ 502 /* high sh */
537 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 503 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
538 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 504 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
539 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 505 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
540 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
541 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 506 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
542 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 507 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
543 /* low mh */ 508 /* low mh */
544 if (rdev->flags & RADEON_IS_MOBILITY) { 509 if (rdev->flags & RADEON_IS_MOBILITY)
545 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 510 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
546 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 511 else
547 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 512 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
548 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 513 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
549 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 514 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
550 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 515 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
551 } else { 516 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
552 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
553 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
554 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
555 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
556 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
557 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
558 }
559 /* mid mh */ 517 /* mid mh */
560 if (rdev->flags & RADEON_IS_MOBILITY) { 518 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
561 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 519 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
562 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 520 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
563 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 521 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
564 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
565 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
566 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
567 } else {
568 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
569 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
570 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
571 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
572 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
573 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
574 }
575 /* high mh */ 522 /* high mh */
576 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 523 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
577 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 524 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
578 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 525 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
579 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
580 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 526 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
581 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 527 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
582 } 528 }
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0a2e023c1557..cb1acffd2430 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -941,7 +941,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
941 track->db_depth_control = radeon_get_ib_value(p, idx); 941 track->db_depth_control = radeon_get_ib_value(p, idx);
942 break; 942 break;
943 case R_028010_DB_DEPTH_INFO: 943 case R_028010_DB_DEPTH_INFO:
944 if (r600_cs_packet_next_is_pkt3_nop(p)) { 944 if (!p->keep_tiling_flags &&
945 r600_cs_packet_next_is_pkt3_nop(p)) {
945 r = r600_cs_packet_next_reloc(p, &reloc); 946 r = r600_cs_packet_next_reloc(p, &reloc);
946 if (r) { 947 if (r) {
947 dev_warn(p->dev, "bad SET_CONTEXT_REG " 948 dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -992,7 +993,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
992 case R_0280B4_CB_COLOR5_INFO: 993 case R_0280B4_CB_COLOR5_INFO:
993 case R_0280B8_CB_COLOR6_INFO: 994 case R_0280B8_CB_COLOR6_INFO:
994 case R_0280BC_CB_COLOR7_INFO: 995 case R_0280BC_CB_COLOR7_INFO:
995 if (r600_cs_packet_next_is_pkt3_nop(p)) { 996 if (!p->keep_tiling_flags &&
997 r600_cs_packet_next_is_pkt3_nop(p)) {
996 r = r600_cs_packet_next_reloc(p, &reloc); 998 r = r600_cs_packet_next_reloc(p, &reloc);
997 if (r) { 999 if (r) {
998 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1000 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
@@ -1291,10 +1293,12 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1291 mip_offset <<= 8; 1293 mip_offset <<= 8;
1292 1294
1293 word0 = radeon_get_ib_value(p, idx + 0); 1295 word0 = radeon_get_ib_value(p, idx + 0);
1294 if (tiling_flags & RADEON_TILING_MACRO) 1296 if (!p->keep_tiling_flags) {
1295 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1297 if (tiling_flags & RADEON_TILING_MACRO)
1296 else if (tiling_flags & RADEON_TILING_MICRO) 1298 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1297 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1299 else if (tiling_flags & RADEON_TILING_MICRO)
1300 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1301 }
1298 word1 = radeon_get_ib_value(p, idx + 1); 1302 word1 = radeon_get_ib_value(p, idx + 1);
1299 w0 = G_038000_TEX_WIDTH(word0) + 1; 1303 w0 = G_038000_TEX_WIDTH(word0) + 1;
1300 h0 = G_038004_TEX_HEIGHT(word1) + 1; 1304 h0 = G_038004_TEX_HEIGHT(word1) + 1;
@@ -1621,10 +1625,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1621 return -EINVAL; 1625 return -EINVAL;
1622 } 1626 }
1623 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1627 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1624 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1628 if (!p->keep_tiling_flags) {
1625 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1629 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1626 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1630 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1627 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1631 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1632 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1633 }
1628 texture = reloc->robj; 1634 texture = reloc->robj;
1629 /* tex mip base */ 1635 /* tex mip base */
1630 r = r600_cs_packet_next_reloc(p, &reloc); 1636 r = r600_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 83b2e016a4a1..c8f4dbd2d17c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -610,7 +610,8 @@ struct radeon_cs_parser {
610 struct radeon_ib *ib; 610 struct radeon_ib *ib;
611 void *track; 611 void *track;
612 unsigned family; 612 unsigned family;
613 int parser_error; 613 int parser_error;
614 bool keep_tiling_flags;
614}; 615};
615 616
616extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); 617extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
@@ -783,8 +784,7 @@ struct radeon_pm_clock_info {
783 784
784struct radeon_power_state { 785struct radeon_power_state {
785 enum radeon_pm_state_type type; 786 enum radeon_pm_state_type type;
786 /* XXX: use a define for num clock modes */ 787 struct radeon_pm_clock_info *clock_info;
787 struct radeon_pm_clock_info clock_info[8];
788 /* number of valid clock modes in this power state */ 788 /* number of valid clock modes in this power state */
789 int num_clock_modes; 789 int num_clock_modes;
790 struct radeon_pm_clock_info *default_clock_mode; 790 struct radeon_pm_clock_info *default_clock_mode;
@@ -854,6 +854,9 @@ struct radeon_pm {
854 struct device *int_hwmon_dev; 854 struct device *int_hwmon_dev;
855}; 855};
856 856
857int radeon_pm_get_type_index(struct radeon_device *rdev,
858 enum radeon_pm_state_type ps_type,
859 int instance);
857 860
858/* 861/*
859 * Benchmarking 862 * Benchmarking
@@ -1141,6 +1144,48 @@ struct r600_vram_scratch {
1141 u64 gpu_addr; 1144 u64 gpu_addr;
1142}; 1145};
1143 1146
1147
1148/*
1149 * Mutex which allows recursive locking from the same process.
1150 */
1151struct radeon_mutex {
1152 struct mutex mutex;
1153 struct task_struct *owner;
1154 int level;
1155};
1156
1157static inline void radeon_mutex_init(struct radeon_mutex *mutex)
1158{
1159 mutex_init(&mutex->mutex);
1160 mutex->owner = NULL;
1161 mutex->level = 0;
1162}
1163
1164static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
1165{
1166 if (mutex_trylock(&mutex->mutex)) {
1167 /* The mutex was unlocked before, so it's ours now */
1168 mutex->owner = current;
1169 } else if (mutex->owner != current) {
1170 /* Another process locked the mutex, take it */
1171 mutex_lock(&mutex->mutex);
1172 mutex->owner = current;
1173 }
1174 /* Otherwise the mutex was already locked by this process */
1175
1176 mutex->level++;
1177}
1178
1179static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
1180{
1181 if (--mutex->level > 0)
1182 return;
1183
1184 mutex->owner = NULL;
1185 mutex_unlock(&mutex->mutex);
1186}
1187
1188
1144/* 1189/*
1145 * Core structure, functions and helpers. 1190 * Core structure, functions and helpers.
1146 */ 1191 */
@@ -1196,7 +1241,7 @@ struct radeon_device {
1196 struct radeon_gem gem; 1241 struct radeon_gem gem;
1197 struct radeon_pm pm; 1242 struct radeon_pm pm;
1198 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; 1243 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
1199 struct mutex cs_mutex; 1244 struct radeon_mutex cs_mutex;
1200 struct radeon_wb wb; 1245 struct radeon_wb wb;
1201 struct radeon_dummy_page dummy_page; 1246 struct radeon_dummy_page dummy_page;
1202 bool gpu_lockup; 1247 bool gpu_lockup;
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 3f6636bb2d7f..3516a6081dcf 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle)
35 35
36 /* Fail only if calling the method fails and ATIF is supported */ 36 /* Fail only if calling the method fails and ATIF is supported */
37 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 37 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
38 printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status)); 38 DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
39 acpi_format_exception(status));
39 kfree(buffer.pointer); 40 kfree(buffer.pointer);
40 return 1; 41 return 1;
41 } 42 }
@@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
50 acpi_handle handle; 51 acpi_handle handle;
51 int ret; 52 int ret;
52 53
53 /* No need to proceed if we're sure that ATIF is not supported */
54 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
55 return 0;
56
57 /* Get the device handle */ 54 /* Get the device handle */
58 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); 55 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
59 56
57 /* No need to proceed if we're sure that ATIF is not supported */
58 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
59 return 0;
60
60 /* Call the ATIF method */ 61 /* Call the ATIF method */
61 ret = radeon_atif_call(handle); 62 ret = radeon_atif_call(handle);
62 if (ret) 63 if (ret)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e2944566ffea..a2e1eae114ef 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -834,7 +834,7 @@ static struct radeon_asic sumo_asic = {
834 .pm_misc = &evergreen_pm_misc, 834 .pm_misc = &evergreen_pm_misc,
835 .pm_prepare = &evergreen_pm_prepare, 835 .pm_prepare = &evergreen_pm_prepare,
836 .pm_finish = &evergreen_pm_finish, 836 .pm_finish = &evergreen_pm_finish,
837 .pm_init_profile = &rs780_pm_init_profile, 837 .pm_init_profile = &sumo_pm_init_profile,
838 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 838 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
839 .pre_page_flip = &evergreen_pre_page_flip, 839 .pre_page_flip = &evergreen_pre_page_flip,
840 .page_flip = &evergreen_page_flip, 840 .page_flip = &evergreen_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 85f14f0337e4..59914842a729 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -413,6 +413,7 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
413extern void evergreen_pm_misc(struct radeon_device *rdev); 413extern void evergreen_pm_misc(struct radeon_device *rdev);
414extern void evergreen_pm_prepare(struct radeon_device *rdev); 414extern void evergreen_pm_prepare(struct radeon_device *rdev);
415extern void evergreen_pm_finish(struct radeon_device *rdev); 415extern void evergreen_pm_finish(struct radeon_device *rdev);
416extern void sumo_pm_init_profile(struct radeon_device *rdev);
416extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); 417extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
417extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 418extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
418extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); 419extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 08d0b94332e6..d24baf30efcb 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -62,6 +62,87 @@ union atom_supported_devices {
62 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; 62 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
63}; 63};
64 64
65static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
66 ATOM_GPIO_I2C_ASSIGMENT *gpio,
67 u8 index)
68{
69 /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
70 if ((rdev->family == CHIP_R420) ||
71 (rdev->family == CHIP_R423) ||
72 (rdev->family == CHIP_RV410)) {
73 if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
74 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
75 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
76 gpio->ucClkMaskShift = 0x19;
77 gpio->ucDataMaskShift = 0x18;
78 }
79 }
80
81 /* some evergreen boards have bad data for this entry */
82 if (ASIC_IS_DCE4(rdev)) {
83 if ((index == 7) &&
84 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
85 (gpio->sucI2cId.ucAccess == 0)) {
86 gpio->sucI2cId.ucAccess = 0x97;
87 gpio->ucDataMaskShift = 8;
88 gpio->ucDataEnShift = 8;
89 gpio->ucDataY_Shift = 8;
90 gpio->ucDataA_Shift = 8;
91 }
92 }
93
94 /* some DCE3 boards have bad data for this entry */
95 if (ASIC_IS_DCE3(rdev)) {
96 if ((index == 4) &&
97 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
98 (gpio->sucI2cId.ucAccess == 0x94))
99 gpio->sucI2cId.ucAccess = 0x14;
100 }
101}
102
103static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
104{
105 struct radeon_i2c_bus_rec i2c;
106
107 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
108
109 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
110 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
111 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
112 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
113 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
114 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
115 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
116 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
117 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
118 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
119 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
120 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
121 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
122 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
123 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
124 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
125
126 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
127 i2c.hw_capable = true;
128 else
129 i2c.hw_capable = false;
130
131 if (gpio->sucI2cId.ucAccess == 0xa0)
132 i2c.mm_i2c = true;
133 else
134 i2c.mm_i2c = false;
135
136 i2c.i2c_id = gpio->sucI2cId.ucAccess;
137
138 if (i2c.mask_clk_reg)
139 i2c.valid = true;
140 else
141 i2c.valid = false;
142
143 return i2c;
144}
145
65static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, 146static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
66 uint8_t id) 147 uint8_t id)
67{ 148{
@@ -85,59 +166,10 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
85 for (i = 0; i < num_indices; i++) { 166 for (i = 0; i < num_indices; i++) {
86 gpio = &i2c_info->asGPIO_Info[i]; 167 gpio = &i2c_info->asGPIO_Info[i];
87 168
88 /* some evergreen boards have bad data for this entry */ 169 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
89 if (ASIC_IS_DCE4(rdev)) {
90 if ((i == 7) &&
91 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
92 (gpio->sucI2cId.ucAccess == 0)) {
93 gpio->sucI2cId.ucAccess = 0x97;
94 gpio->ucDataMaskShift = 8;
95 gpio->ucDataEnShift = 8;
96 gpio->ucDataY_Shift = 8;
97 gpio->ucDataA_Shift = 8;
98 }
99 }
100
101 /* some DCE3 boards have bad data for this entry */
102 if (ASIC_IS_DCE3(rdev)) {
103 if ((i == 4) &&
104 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
105 (gpio->sucI2cId.ucAccess == 0x94))
106 gpio->sucI2cId.ucAccess = 0x14;
107 }
108 170
109 if (gpio->sucI2cId.ucAccess == id) { 171 if (gpio->sucI2cId.ucAccess == id) {
110 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 172 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
111 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
112 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
113 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
114 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
115 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
116 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
117 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
118 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
119 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
120 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
121 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
122 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
123 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
124 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
125 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
126
127 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
128 i2c.hw_capable = true;
129 else
130 i2c.hw_capable = false;
131
132 if (gpio->sucI2cId.ucAccess == 0xa0)
133 i2c.mm_i2c = true;
134 else
135 i2c.mm_i2c = false;
136
137 i2c.i2c_id = gpio->sucI2cId.ucAccess;
138
139 if (i2c.mask_clk_reg)
140 i2c.valid = true;
141 break; 173 break;
142 } 174 }
143 } 175 }
@@ -157,8 +189,6 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
157 int i, num_indices; 189 int i, num_indices;
158 char stmp[32]; 190 char stmp[32];
159 191
160 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
161
162 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { 192 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
163 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 193 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
164 194
@@ -167,60 +197,12 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
167 197
168 for (i = 0; i < num_indices; i++) { 198 for (i = 0; i < num_indices; i++) {
169 gpio = &i2c_info->asGPIO_Info[i]; 199 gpio = &i2c_info->asGPIO_Info[i];
170 i2c.valid = false;
171
172 /* some evergreen boards have bad data for this entry */
173 if (ASIC_IS_DCE4(rdev)) {
174 if ((i == 7) &&
175 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
176 (gpio->sucI2cId.ucAccess == 0)) {
177 gpio->sucI2cId.ucAccess = 0x97;
178 gpio->ucDataMaskShift = 8;
179 gpio->ucDataEnShift = 8;
180 gpio->ucDataY_Shift = 8;
181 gpio->ucDataA_Shift = 8;
182 }
183 }
184 200
185 /* some DCE3 boards have bad data for this entry */ 201 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
186 if (ASIC_IS_DCE3(rdev)) {
187 if ((i == 4) &&
188 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
189 (gpio->sucI2cId.ucAccess == 0x94))
190 gpio->sucI2cId.ucAccess = 0x14;
191 }
192 202
193 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 203 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
194 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
195 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
196 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
197 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
198 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
199 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
200 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
201 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
202 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
203 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
204 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
205 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
206 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
207 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
208 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
209
210 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
211 i2c.hw_capable = true;
212 else
213 i2c.hw_capable = false;
214
215 if (gpio->sucI2cId.ucAccess == 0xa0)
216 i2c.mm_i2c = true;
217 else
218 i2c.mm_i2c = false;
219 204
220 i2c.i2c_id = gpio->sucI2cId.ucAccess; 205 if (i2c.valid) {
221
222 if (i2c.mask_clk_reg) {
223 i2c.valid = true;
224 sprintf(stmp, "0x%x", i2c.i2c_id); 206 sprintf(stmp, "0x%x", i2c.i2c_id);
225 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); 207 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
226 } 208 }
@@ -1996,10 +1978,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
1996 return state_index; 1978 return state_index;
1997 /* last mode is usually default, array is low to high */ 1979 /* last mode is usually default, array is low to high */
1998 for (i = 0; i < num_modes; i++) { 1980 for (i = 0; i < num_modes; i++) {
1981 rdev->pm.power_state[state_index].clock_info =
1982 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
1983 if (!rdev->pm.power_state[state_index].clock_info)
1984 return state_index;
1985 rdev->pm.power_state[state_index].num_clock_modes = 1;
1999 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 1986 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2000 switch (frev) { 1987 switch (frev) {
2001 case 1: 1988 case 1:
2002 rdev->pm.power_state[state_index].num_clock_modes = 1;
2003 rdev->pm.power_state[state_index].clock_info[0].mclk = 1989 rdev->pm.power_state[state_index].clock_info[0].mclk =
2004 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); 1990 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
2005 rdev->pm.power_state[state_index].clock_info[0].sclk = 1991 rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2035,7 +2021,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2035 state_index++; 2021 state_index++;
2036 break; 2022 break;
2037 case 2: 2023 case 2:
2038 rdev->pm.power_state[state_index].num_clock_modes = 1;
2039 rdev->pm.power_state[state_index].clock_info[0].mclk = 2024 rdev->pm.power_state[state_index].clock_info[0].mclk =
2040 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); 2025 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
2041 rdev->pm.power_state[state_index].clock_info[0].sclk = 2026 rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2072,7 +2057,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2072 state_index++; 2057 state_index++;
2073 break; 2058 break;
2074 case 3: 2059 case 3:
2075 rdev->pm.power_state[state_index].num_clock_modes = 1;
2076 rdev->pm.power_state[state_index].clock_info[0].mclk = 2060 rdev->pm.power_state[state_index].clock_info[0].mclk =
2077 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); 2061 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
2078 rdev->pm.power_state[state_index].clock_info[0].sclk = 2062 rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2257,7 +2241,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2257 rdev->pm.default_power_state_index = state_index; 2241 rdev->pm.default_power_state_index = state_index;
2258 rdev->pm.power_state[state_index].default_clock_mode = 2242 rdev->pm.power_state[state_index].default_clock_mode =
2259 &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; 2243 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
2260 if (ASIC_IS_DCE5(rdev)) { 2244 if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
2261 /* NI chips post without MC ucode, so default clocks are strobe mode only */ 2245 /* NI chips post without MC ucode, so default clocks are strobe mode only */
2262 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; 2246 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
2263 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; 2247 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
@@ -2377,17 +2361,31 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
2377 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + 2361 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
2378 (power_state->v1.ucNonClockStateIndex * 2362 (power_state->v1.ucNonClockStateIndex *
2379 power_info->pplib.ucNonClockSize)); 2363 power_info->pplib.ucNonClockSize));
2380 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { 2364 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
2381 clock_info = (union pplib_clock_info *) 2365 ((power_info->pplib.ucStateEntrySize - 1) ?
2382 (mode_info->atom_context->bios + data_offset + 2366 (power_info->pplib.ucStateEntrySize - 1) : 1),
2383 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + 2367 GFP_KERNEL);
2384 (power_state->v1.ucClockStateIndices[j] * 2368 if (!rdev->pm.power_state[i].clock_info)
2385 power_info->pplib.ucClockInfoSize)); 2369 return state_index;
2386 valid = radeon_atombios_parse_pplib_clock_info(rdev, 2370 if (power_info->pplib.ucStateEntrySize - 1) {
2387 state_index, mode_index, 2371 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
2388 clock_info); 2372 clock_info = (union pplib_clock_info *)
2389 if (valid) 2373 (mode_info->atom_context->bios + data_offset +
2390 mode_index++; 2374 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
2375 (power_state->v1.ucClockStateIndices[j] *
2376 power_info->pplib.ucClockInfoSize));
2377 valid = radeon_atombios_parse_pplib_clock_info(rdev,
2378 state_index, mode_index,
2379 clock_info);
2380 if (valid)
2381 mode_index++;
2382 }
2383 } else {
2384 rdev->pm.power_state[state_index].clock_info[0].mclk =
2385 rdev->clock.default_mclk;
2386 rdev->pm.power_state[state_index].clock_info[0].sclk =
2387 rdev->clock.default_sclk;
2388 mode_index++;
2391 } 2389 }
2392 rdev->pm.power_state[state_index].num_clock_modes = mode_index; 2390 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
2393 if (mode_index) { 2391 if (mode_index) {
@@ -2456,18 +2454,32 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2456 non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ 2454 non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
2457 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2455 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2458 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2456 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2459 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2457 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
2460 clock_array_index = power_state->v2.clockInfoIndex[j]; 2458 (power_state->v2.ucNumDPMLevels ?
2461 /* XXX this might be an inagua bug... */ 2459 power_state->v2.ucNumDPMLevels : 1),
2462 if (clock_array_index >= clock_info_array->ucNumEntries) 2460 GFP_KERNEL);
2463 continue; 2461 if (!rdev->pm.power_state[i].clock_info)
2464 clock_info = (union pplib_clock_info *) 2462 return state_index;
2465 &clock_info_array->clockInfo[clock_array_index]; 2463 if (power_state->v2.ucNumDPMLevels) {
2466 valid = radeon_atombios_parse_pplib_clock_info(rdev, 2464 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2467 state_index, mode_index, 2465 clock_array_index = power_state->v2.clockInfoIndex[j];
2468 clock_info); 2466 /* XXX this might be an inagua bug... */
2469 if (valid) 2467 if (clock_array_index >= clock_info_array->ucNumEntries)
2470 mode_index++; 2468 continue;
2469 clock_info = (union pplib_clock_info *)
2470 &clock_info_array->clockInfo[clock_array_index];
2471 valid = radeon_atombios_parse_pplib_clock_info(rdev,
2472 state_index, mode_index,
2473 clock_info);
2474 if (valid)
2475 mode_index++;
2476 }
2477 } else {
2478 rdev->pm.power_state[state_index].clock_info[0].mclk =
2479 rdev->clock.default_mclk;
2480 rdev->pm.power_state[state_index].clock_info[0].sclk =
2481 rdev->clock.default_sclk;
2482 mode_index++;
2471 } 2483 }
2472 rdev->pm.power_state[state_index].num_clock_modes = mode_index; 2484 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
2473 if (mode_index) { 2485 if (mode_index) {
@@ -2524,19 +2536,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2524 } else { 2536 } else {
2525 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); 2537 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
2526 if (rdev->pm.power_state) { 2538 if (rdev->pm.power_state) {
2527 /* add the default mode */ 2539 rdev->pm.power_state[0].clock_info =
2528 rdev->pm.power_state[state_index].type = 2540 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2529 POWER_STATE_TYPE_DEFAULT; 2541 if (rdev->pm.power_state[0].clock_info) {
2530 rdev->pm.power_state[state_index].num_clock_modes = 1; 2542 /* add the default mode */
2531 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; 2543 rdev->pm.power_state[state_index].type =
2532 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; 2544 POWER_STATE_TYPE_DEFAULT;
2533 rdev->pm.power_state[state_index].default_clock_mode = 2545 rdev->pm.power_state[state_index].num_clock_modes = 1;
2534 &rdev->pm.power_state[state_index].clock_info[0]; 2546 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
2535 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 2547 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
2536 rdev->pm.power_state[state_index].pcie_lanes = 16; 2548 rdev->pm.power_state[state_index].default_clock_mode =
2537 rdev->pm.default_power_state_index = state_index; 2549 &rdev->pm.power_state[state_index].clock_info[0];
2538 rdev->pm.power_state[state_index].flags = 0; 2550 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2539 state_index++; 2551 rdev->pm.power_state[state_index].pcie_lanes = 16;
2552 rdev->pm.default_power_state_index = state_index;
2553 rdev->pm.power_state[state_index].flags = 0;
2554 state_index++;
2555 }
2540 } 2556 }
2541 } 2557 }
2542 2558
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 5cafc90de7f8..17e1a9b2d8fb 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -98,7 +98,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
98 struct radeon_bo *sobj = NULL; 98 struct radeon_bo *sobj = NULL;
99 uint64_t saddr, daddr; 99 uint64_t saddr, daddr;
100 int r, n; 100 int r, n;
101 unsigned int time; 101 int time;
102 102
103 n = RADEON_BENCHMARK_ITERATIONS; 103 n = RADEON_BENCHMARK_ITERATIONS;
104 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); 104 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 8bf83c4b4147..81fc100be7e1 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2563,14 +2563,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2563 2563
2564 /* allocate 2 power states */ 2564 /* allocate 2 power states */
2565 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); 2565 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
2566 if (!rdev->pm.power_state) { 2566 if (rdev->pm.power_state) {
2567 rdev->pm.default_power_state_index = state_index; 2567 /* allocate 1 clock mode per state */
2568 rdev->pm.num_power_states = 0; 2568 rdev->pm.power_state[0].clock_info =
2569 2569 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2570 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 2570 rdev->pm.power_state[1].clock_info =
2571 rdev->pm.current_clock_mode_index = 0; 2571 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2572 return; 2572 if (!rdev->pm.power_state[0].clock_info ||
2573 } 2573 !rdev->pm.power_state[1].clock_info)
2574 goto pm_failed;
2575 } else
2576 goto pm_failed;
2574 2577
2575 /* check for a thermal chip */ 2578 /* check for a thermal chip */
2576 offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); 2579 offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
@@ -2735,6 +2738,14 @@ default_mode:
2735 2738
2736 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 2739 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
2737 rdev->pm.current_clock_mode_index = 0; 2740 rdev->pm.current_clock_mode_index = 0;
2741 return;
2742
2743pm_failed:
2744 rdev->pm.default_power_state_index = state_index;
2745 rdev->pm.num_power_states = 0;
2746
2747 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
2748 rdev->pm.current_clock_mode_index = 0;
2738} 2749}
2739 2750
2740void radeon_external_tmds_setup(struct drm_encoder *encoder) 2751void radeon_external_tmds_setup(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index fae00c0d75aa..29afd71e0840 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -93,7 +93,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
93{ 93{
94 struct drm_radeon_cs *cs = data; 94 struct drm_radeon_cs *cs = data;
95 uint64_t *chunk_array_ptr; 95 uint64_t *chunk_array_ptr;
96 unsigned size, i; 96 unsigned size, i, flags = 0;
97 97
98 if (!cs->num_chunks) { 98 if (!cs->num_chunks) {
99 return 0; 99 return 0;
@@ -140,6 +140,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
140 if (p->chunks[i].length_dw == 0) 140 if (p->chunks[i].length_dw == 0)
141 return -EINVAL; 141 return -EINVAL;
142 } 142 }
143 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
144 !p->chunks[i].length_dw) {
145 return -EINVAL;
146 }
143 147
144 p->chunks[i].length_dw = user_chunk.length_dw; 148 p->chunks[i].length_dw = user_chunk.length_dw;
145 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; 149 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
@@ -155,6 +159,9 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
155 p->chunks[i].user_ptr, size)) { 159 p->chunks[i].user_ptr, size)) {
156 return -EFAULT; 160 return -EFAULT;
157 } 161 }
162 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
163 flags = p->chunks[i].kdata[0];
164 }
158 } else { 165 } else {
159 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); 166 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
160 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); 167 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -174,6 +181,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
174 p->chunks[p->chunk_ib_idx].length_dw); 181 p->chunks[p->chunk_ib_idx].length_dw);
175 return -EINVAL; 182 return -EINVAL;
176 } 183 }
184
185 p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
177 return 0; 186 return 0;
178} 187}
179 188
@@ -222,7 +231,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
222 struct radeon_cs_chunk *ib_chunk; 231 struct radeon_cs_chunk *ib_chunk;
223 int r; 232 int r;
224 233
225 mutex_lock(&rdev->cs_mutex); 234 radeon_mutex_lock(&rdev->cs_mutex);
226 /* initialize parser */ 235 /* initialize parser */
227 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 236 memset(&parser, 0, sizeof(struct radeon_cs_parser));
228 parser.filp = filp; 237 parser.filp = filp;
@@ -233,14 +242,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
233 if (r) { 242 if (r) {
234 DRM_ERROR("Failed to initialize parser !\n"); 243 DRM_ERROR("Failed to initialize parser !\n");
235 radeon_cs_parser_fini(&parser, r); 244 radeon_cs_parser_fini(&parser, r);
236 mutex_unlock(&rdev->cs_mutex); 245 radeon_mutex_unlock(&rdev->cs_mutex);
237 return r; 246 return r;
238 } 247 }
239 r = radeon_ib_get(rdev, &parser.ib); 248 r = radeon_ib_get(rdev, &parser.ib);
240 if (r) { 249 if (r) {
241 DRM_ERROR("Failed to get ib !\n"); 250 DRM_ERROR("Failed to get ib !\n");
242 radeon_cs_parser_fini(&parser, r); 251 radeon_cs_parser_fini(&parser, r);
243 mutex_unlock(&rdev->cs_mutex); 252 radeon_mutex_unlock(&rdev->cs_mutex);
244 return r; 253 return r;
245 } 254 }
246 r = radeon_cs_parser_relocs(&parser); 255 r = radeon_cs_parser_relocs(&parser);
@@ -248,7 +257,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
248 if (r != -ERESTARTSYS) 257 if (r != -ERESTARTSYS)
249 DRM_ERROR("Failed to parse relocation %d!\n", r); 258 DRM_ERROR("Failed to parse relocation %d!\n", r);
250 radeon_cs_parser_fini(&parser, r); 259 radeon_cs_parser_fini(&parser, r);
251 mutex_unlock(&rdev->cs_mutex); 260 radeon_mutex_unlock(&rdev->cs_mutex);
252 return r; 261 return r;
253 } 262 }
254 /* Copy the packet into the IB, the parser will read from the 263 /* Copy the packet into the IB, the parser will read from the
@@ -260,14 +269,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
260 if (r || parser.parser_error) { 269 if (r || parser.parser_error) {
261 DRM_ERROR("Invalid command stream !\n"); 270 DRM_ERROR("Invalid command stream !\n");
262 radeon_cs_parser_fini(&parser, r); 271 radeon_cs_parser_fini(&parser, r);
263 mutex_unlock(&rdev->cs_mutex); 272 radeon_mutex_unlock(&rdev->cs_mutex);
264 return r; 273 return r;
265 } 274 }
266 r = radeon_cs_finish_pages(&parser); 275 r = radeon_cs_finish_pages(&parser);
267 if (r) { 276 if (r) {
268 DRM_ERROR("Invalid command stream !\n"); 277 DRM_ERROR("Invalid command stream !\n");
269 radeon_cs_parser_fini(&parser, r); 278 radeon_cs_parser_fini(&parser, r);
270 mutex_unlock(&rdev->cs_mutex); 279 radeon_mutex_unlock(&rdev->cs_mutex);
271 return r; 280 return r;
272 } 281 }
273 r = radeon_ib_schedule(rdev, parser.ib); 282 r = radeon_ib_schedule(rdev, parser.ib);
@@ -275,7 +284,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
275 DRM_ERROR("Failed to schedule IB !\n"); 284 DRM_ERROR("Failed to schedule IB !\n");
276 } 285 }
277 radeon_cs_parser_fini(&parser, r); 286 radeon_cs_parser_fini(&parser, r);
278 mutex_unlock(&rdev->cs_mutex); 287 radeon_mutex_unlock(&rdev->cs_mutex);
279 return r; 288 return r;
280} 289}
281 290
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7c31321df45b..fb347a80486f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -716,7 +716,7 @@ int radeon_device_init(struct radeon_device *rdev,
716 716
717 /* mutex initialization are all done here so we 717 /* mutex initialization are all done here so we
718 * can recall function without having locking issues */ 718 * can recall function without having locking issues */
719 mutex_init(&rdev->cs_mutex); 719 radeon_mutex_init(&rdev->cs_mutex);
720 mutex_init(&rdev->ib_pool.mutex); 720 mutex_init(&rdev->ib_pool.mutex);
721 mutex_init(&rdev->cp.mutex); 721 mutex_init(&rdev->cp.mutex);
722 mutex_init(&rdev->dc_hw_i2c_mutex); 722 mutex_init(&rdev->dc_hw_i2c_mutex);
@@ -961,6 +961,9 @@ int radeon_gpu_reset(struct radeon_device *rdev)
961 int r; 961 int r;
962 int resched; 962 int resched;
963 963
964 /* Prevent CS ioctl from interfering */
965 radeon_mutex_lock(&rdev->cs_mutex);
966
964 radeon_save_bios_scratch_regs(rdev); 967 radeon_save_bios_scratch_regs(rdev);
965 /* block TTM */ 968 /* block TTM */
966 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 969 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -973,10 +976,15 @@ int radeon_gpu_reset(struct radeon_device *rdev)
973 radeon_restore_bios_scratch_regs(rdev); 976 radeon_restore_bios_scratch_regs(rdev);
974 drm_helper_resume_force_mode(rdev->ddev); 977 drm_helper_resume_force_mode(rdev->ddev);
975 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 978 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
976 return 0;
977 } 979 }
978 /* bad news, how to tell it to userspace ? */ 980
979 dev_info(rdev->dev, "GPU reset failed\n"); 981 radeon_mutex_unlock(&rdev->cs_mutex);
982
983 if (r) {
984 /* bad news, how to tell it to userspace ? */
985 dev_info(rdev->dev, "GPU reset failed\n");
986 }
987
980 return r; 988 return r;
981} 989}
982 990
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e42c34b98c7b..c3ef1d266f88 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -53,9 +53,10 @@
53 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query 53 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
54 * 2.10.0 - fusion 2D tiling 54 * 2.10.0 - fusion 2D tiling
55 * 2.11.0 - backend map, initial compute support for the CS checker 55 * 2.11.0 - backend map, initial compute support for the CS checker
56 * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
56 */ 57 */
57#define KMS_DRIVER_MAJOR 2 58#define KMS_DRIVER_MAJOR 2
58#define KMS_DRIVER_MINOR 11 59#define KMS_DRIVER_MINOR 12
59#define KMS_DRIVER_PATCHLEVEL 0 60#define KMS_DRIVER_PATCHLEVEL 0
60int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 61int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
61int radeon_driver_unload_kms(struct drm_device *dev); 62int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 06e413e6a920..4b27efa4405b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
233 switch (radeon_encoder->encoder_id) { 233 switch (radeon_encoder->encoder_id) {
234 case ENCODER_OBJECT_ID_TRAVIS: 234 case ENCODER_OBJECT_ID_TRAVIS:
235 case ENCODER_OBJECT_ID_NUTMEG: 235 case ENCODER_OBJECT_ID_NUTMEG:
236 return true; 236 return radeon_encoder->encoder_id;
237 default: 237 default:
238 return false; 238 return ENCODER_OBJECT_ID_NONE;
239 } 239 }
240 } 240 }
241 241 return ENCODER_OBJECT_ID_NONE;
242 return false;
243} 242}
244 243
245void radeon_panel_mode_fixup(struct drm_encoder *encoder, 244void radeon_panel_mode_fixup(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 95b93604b679..25a19c483075 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -991,12 +991,6 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
991 struct drm_display_mode *mode, 991 struct drm_display_mode *mode,
992 struct drm_display_mode *adjusted_mode) 992 struct drm_display_mode *adjusted_mode)
993{ 993{
994 struct drm_device *dev = crtc->dev;
995 struct radeon_device *rdev = dev->dev_private;
996
997 /* adjust pm to upcoming mode change */
998 radeon_pm_compute_clocks(rdev);
999
1000 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 994 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
1001 return false; 995 return false;
1002 return true; 996 return true;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6fabe89fa6a1..78a665bd9519 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -53,6 +53,24 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev);
53 53
54#define ACPI_AC_CLASS "ac_adapter" 54#define ACPI_AC_CLASS "ac_adapter"
55 55
56int radeon_pm_get_type_index(struct radeon_device *rdev,
57 enum radeon_pm_state_type ps_type,
58 int instance)
59{
60 int i;
61 int found_instance = -1;
62
63 for (i = 0; i < rdev->pm.num_power_states; i++) {
64 if (rdev->pm.power_state[i].type == ps_type) {
65 found_instance++;
66 if (found_instance == instance)
67 return i;
68 }
69 }
70 /* return default if no match */
71 return rdev->pm.default_power_state_index;
72}
73
56#ifdef CONFIG_ACPI 74#ifdef CONFIG_ACPI
57static int radeon_acpi_event(struct notifier_block *nb, 75static int radeon_acpi_event(struct notifier_block *nb,
58 unsigned long val, 76 unsigned long val,
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 481b99e89f65..b1053d640423 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
62{ 62{
63 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 63 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
64 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 64 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
65 int i;
65 66
66 /* Lock the graphics update lock */ 67 /* Lock the graphics update lock */
67 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 68 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
74 (u32)crtc_base); 75 (u32)crtc_base);
75 76
76 /* Wait for update_pending to go high. */ 77 /* Wait for update_pending to go high. */
77 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); 78 for (i = 0; i < rdev->usec_timeout; i++) {
79 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
80 break;
81 udelay(1);
82 }
78 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 83 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
79 84
80 /* Unlock the lock, so double-buffering can take place inside vblank */ 85 /* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index a983f410ab89..23ae1c60ab3d 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
47{ 47{
48 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 48 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
49 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 49 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
50 int i;
50 51
51 /* Lock the graphics update lock */ 52 /* Lock the graphics update lock */
52 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 53 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
66 (u32)crtc_base); 67 (u32)crtc_base);
67 68
68 /* Wait for update_pending to go high. */ 69 /* Wait for update_pending to go high. */
69 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); 70 for (i = 0; i < rdev->usec_timeout; i++) {
71 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
72 break;
73 udelay(1);
74 }
70 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 75 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
71 76
72 /* Unlock the lock, so double-buffering can take place inside vblank */ 77 /* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 119b6e3ff906..2f0eab66ece6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -562,10 +562,16 @@ retry:
562 return ret; 562 return ret;
563 563
564 spin_lock(&glob->lru_lock); 564 spin_lock(&glob->lru_lock);
565
566 if (unlikely(list_empty(&bo->ddestroy))) {
567 spin_unlock(&glob->lru_lock);
568 return 0;
569 }
570
565 ret = ttm_bo_reserve_locked(bo, interruptible, 571 ret = ttm_bo_reserve_locked(bo, interruptible,
566 no_wait_reserve, false, 0); 572 no_wait_reserve, false, 0);
567 573
568 if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { 574 if (unlikely(ret != 0)) {
569 spin_unlock(&glob->lru_lock); 575 spin_unlock(&glob->lru_lock);
570 return ret; 576 return ret;
571 } 577 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 3f6343502d1f..5ff561d4e0b4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -140,7 +140,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
140 goto out_clips; 140 goto out_clips;
141 } 141 }
142 142
143 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 143 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
144 if (clips == NULL) { 144 if (clips == NULL) {
145 DRM_ERROR("Failed to allocate clip rect list.\n"); 145 DRM_ERROR("Failed to allocate clip rect list.\n");
146 ret = -ENOMEM; 146 ret = -ENOMEM;
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
232 goto out_clips; 232 goto out_clips;
233 } 233 }
234 234
235 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 235 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
236 if (clips == NULL) { 236 if (clips == NULL) {
237 DRM_ERROR("Failed to allocate clip rect list.\n"); 237 DRM_ERROR("Failed to allocate clip rect list.\n");
238 ret = -ENOMEM; 238 ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 0585987f2945..1748a7142aca 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -105,6 +105,10 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
105 struct vmw_dma_buffer *dmabuf = NULL; 105 struct vmw_dma_buffer *dmabuf = NULL;
106 int ret; 106 int ret;
107 107
108 /* A lot of the code assumes this */
109 if (handle && (width != 64 || height != 64))
110 return -EINVAL;
111
108 if (handle) { 112 if (handle) {
109 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, 113 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
110 handle, &surface); 114 handle, &surface);
@@ -410,8 +414,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
410 top = clips->y1; 414 top = clips->y1;
411 bottom = clips->y2; 415 bottom = clips->y2;
412 416
413 clips_ptr = clips; 417 /* skip the first clip rect */
414 for (i = 1; i < num_clips; i++, clips_ptr += inc) { 418 for (i = 1, clips_ptr = clips + inc;
419 i < num_clips; i++, clips_ptr += inc) {
415 left = min_t(int, left, (int)clips_ptr->x1); 420 left = min_t(int, left, (int)clips_ptr->x1);
416 right = max_t(int, right, (int)clips_ptr->x2); 421 right = max_t(int, right, (int)clips_ptr->x2);
417 top = min_t(int, top, (int)clips_ptr->y1); 422 top = min_t(int, top, (int)clips_ptr->y1);
@@ -1331,7 +1336,10 @@ int vmw_kms_close(struct vmw_private *dev_priv)
1331 * drm_encoder_cleanup which takes the lock we deadlock. 1336 * drm_encoder_cleanup which takes the lock we deadlock.
1332 */ 1337 */
1333 drm_mode_config_cleanup(dev_priv->dev); 1338 drm_mode_config_cleanup(dev_priv->dev);
1334 vmw_kms_close_legacy_display_system(dev_priv); 1339 if (dev_priv->sou_priv)
1340 vmw_kms_close_screen_object_display(dev_priv);
1341 else
1342 vmw_kms_close_legacy_display_system(dev_priv);
1335 return 0; 1343 return 0;
1336} 1344}
1337 1345
@@ -1809,7 +1817,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1809 } 1817 }
1810 1818
1811 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); 1819 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
1812 rects = kzalloc(rects_size, GFP_KERNEL); 1820 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
1821 GFP_KERNEL);
1813 if (unlikely(!rects)) { 1822 if (unlikely(!rects)) {
1814 ret = -ENOMEM; 1823 ret = -ENOMEM;
1815 goto out_unlock; 1824 goto out_unlock;
@@ -1824,10 +1833,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1824 } 1833 }
1825 1834
1826 for (i = 0; i < arg->num_outputs; ++i) { 1835 for (i = 0; i < arg->num_outputs; ++i) {
1827 if (rects->x < 0 || 1836 if (rects[i].x < 0 ||
1828 rects->y < 0 || 1837 rects[i].y < 0 ||
1829 rects->x + rects->w > mode_config->max_width || 1838 rects[i].x + rects[i].w > mode_config->max_width ||
1830 rects->y + rects->h > mode_config->max_height) { 1839 rects[i].y + rects[i].h > mode_config->max_height) {
1831 DRM_ERROR("Invalid GUI layout.\n"); 1840 DRM_ERROR("Invalid GUI layout.\n");
1832 ret = -EINVAL; 1841 ret = -EINVAL;
1833 goto out_free; 1842 goto out_free;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index c72f1c0b5e63..111d956d8e7d 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -465,31 +465,29 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
465 while (new_bus) { 465 while (new_bus) {
466 new_bridge = new_bus->self; 466 new_bridge = new_bus->self;
467 467
468 if (new_bridge) { 468 /* go through list of devices already registered */
469 /* go through list of devices already registered */ 469 list_for_each_entry(same_bridge_vgadev, &vga_list, list) {
470 list_for_each_entry(same_bridge_vgadev, &vga_list, list) { 470 bus = same_bridge_vgadev->pdev->bus;
471 bus = same_bridge_vgadev->pdev->bus; 471 bridge = bus->self;
472 bridge = bus->self; 472
473 473 /* see if the share a bridge with this device */
474 /* see if the share a bridge with this device */ 474 if (new_bridge == bridge) {
475 if (new_bridge == bridge) { 475 /* if their direct parent bridge is the same
476 /* if their direct parent bridge is the same 476 as any bridge of this device then it can't be used
477 as any bridge of this device then it can't be used 477 for that device */
478 for that device */ 478 same_bridge_vgadev->bridge_has_one_vga = false;
479 same_bridge_vgadev->bridge_has_one_vga = false; 479 }
480 }
481 480
482 /* now iterate the previous devices bridge hierarchy */ 481 /* now iterate the previous devices bridge hierarchy */
483 /* if the new devices parent bridge is in the other devices 482 /* if the new devices parent bridge is in the other devices
484 hierarchy then we can't use it to control this device */ 483 hierarchy then we can't use it to control this device */
485 while (bus) { 484 while (bus) {
486 bridge = bus->self; 485 bridge = bus->self;
487 if (bridge) { 486 if (bridge) {
488 if (bridge == vgadev->pdev->bus->self) 487 if (bridge == vgadev->pdev->bus->self)
489 vgadev->bridge_has_one_vga = false; 488 vgadev->bridge_has_one_vga = false;
490 }
491 bus = bus->parent;
492 } 489 }
490 bus = bus->parent;
493 } 491 }
494 } 492 }
495 new_bus = new_bus->parent; 493 new_bus = new_bus->parent;
@@ -993,14 +991,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
993 uc = &priv->cards[i]; 991 uc = &priv->cards[i];
994 } 992 }
995 993
996 if (!uc) 994 if (!uc) {
997 return -EINVAL; 995 ret_val = -EINVAL;
996 goto done;
997 }
998 998
999 if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) 999 if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
1000 return -EINVAL; 1000 ret_val = -EINVAL;
1001 goto done;
1002 }
1001 1003
1002 if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) 1004 if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
1003 return -EINVAL; 1005 ret_val = -EINVAL;
1006 goto done;
1007 }
1004 1008
1005 vga_put(pdev, io_state); 1009 vga_put(pdev, io_state);
1006 1010
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 848a56c0279c..af353842f75f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1771,8 +1771,8 @@ static const struct hid_device_id hid_ignore_list[] = {
1771 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, 1771 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
1772 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, 1772 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
1773 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, 1773 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
1774 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
1774 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) }, 1775 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
1775 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
1776 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) }, 1776 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
1777 { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) }, 1777 { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
1778 { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) }, 1778 { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 06ce996b8b65..4a441a6f9967 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -266,7 +266,7 @@
266#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002 266#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
267 267
268#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc 268#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
269#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001 269#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
270 270
271#define USB_VENDOR_ID_GLAB 0x06c2 271#define USB_VENDOR_ID_GLAB 0x06c2
272#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038 272#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9ec854ae118b..91be41f60809 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -315,7 +315,7 @@ config SENSORS_DS1621
315 315
316config SENSORS_EXYNOS4_TMU 316config SENSORS_EXYNOS4_TMU
317 tristate "Temperature sensor on Samsung EXYNOS4" 317 tristate "Temperature sensor on Samsung EXYNOS4"
318 depends on EXYNOS4_DEV_TMU 318 depends on ARCH_EXYNOS4
319 help 319 help
320 If you say yes here you get support for TMU (Thermal Managment 320 If you say yes here you get support for TMU (Thermal Managment
321 Unit) on SAMSUNG EXYNOS4 series of SoC. 321 Unit) on SAMSUNG EXYNOS4 series of SoC.
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 318e38e85376..5d760f3d21c2 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -160,7 +160,6 @@ MODULE_DEVICE_TABLE(spi, ad7314_id);
160static struct spi_driver ad7314_driver = { 160static struct spi_driver ad7314_driver = {
161 .driver = { 161 .driver = {
162 .name = "ad7314", 162 .name = "ad7314",
163 .bus = &spi_bus_type,
164 .owner = THIS_MODULE, 163 .owner = THIS_MODULE,
165 }, 164 },
166 .probe = ad7314_probe, 165 .probe = ad7314_probe,
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index 52319340e182..04450f8bf5da 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -227,7 +227,6 @@ static int __devexit ads7871_remove(struct spi_device *spi)
227static struct spi_driver ads7871_driver = { 227static struct spi_driver ads7871_driver = {
228 .driver = { 228 .driver = {
229 .name = DEVICE_NAME, 229 .name = DEVICE_NAME,
230 .bus = &spi_bus_type,
231 .owner = THIS_MODULE, 230 .owner = THIS_MODULE,
232 }, 231 },
233 232
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c
index faa0884f61f6..f2359a0093bd 100644
--- a/drivers/hwmon/exynos4_tmu.c
+++ b/drivers/hwmon/exynos4_tmu.c
@@ -506,17 +506,7 @@ static struct platform_driver exynos4_tmu_driver = {
506 .resume = exynos4_tmu_resume, 506 .resume = exynos4_tmu_resume,
507}; 507};
508 508
509static int __init exynos4_tmu_driver_init(void) 509module_platform_driver(exynos4_tmu_driver);
510{
511 return platform_driver_register(&exynos4_tmu_driver);
512}
513module_init(exynos4_tmu_driver_init);
514
515static void __exit exynos4_tmu_driver_exit(void)
516{
517 platform_driver_unregister(&exynos4_tmu_driver);
518}
519module_exit(exynos4_tmu_driver_exit);
520 510
521MODULE_DESCRIPTION("EXYNOS4 TMU Driver"); 511MODULE_DESCRIPTION("EXYNOS4 TMU Driver");
522MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>"); 512MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 89aa9fb743af..9ba38f318ffb 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -539,18 +539,7 @@ static struct platform_driver gpio_fan_driver = {
539 }, 539 },
540}; 540};
541 541
542static int __init gpio_fan_init(void) 542module_platform_driver(gpio_fan_driver);
543{
544 return platform_driver_register(&gpio_fan_driver);
545}
546
547static void __exit gpio_fan_exit(void)
548{
549 platform_driver_unregister(&gpio_fan_driver);
550}
551
552module_init(gpio_fan_init);
553module_exit(gpio_fan_exit);
554 543
555MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>"); 544MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
556MODULE_DESCRIPTION("GPIO FAN driver"); 545MODULE_DESCRIPTION("GPIO FAN driver");
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index fea292d43407..5253d23361d9 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -59,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
59{ 59{
60 struct jz4740_hwmon *hwmon = dev_get_drvdata(dev); 60 struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
61 struct completion *completion = &hwmon->read_completion; 61 struct completion *completion = &hwmon->read_completion;
62 unsigned long t; 62 long t;
63 unsigned long val; 63 unsigned long val;
64 int ret; 64 int ret;
65 65
@@ -203,7 +203,7 @@ static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
203 return 0; 203 return 0;
204} 204}
205 205
206struct platform_driver jz4740_hwmon_driver = { 206static struct platform_driver jz4740_hwmon_driver = {
207 .probe = jz4740_hwmon_probe, 207 .probe = jz4740_hwmon_probe,
208 .remove = __devexit_p(jz4740_hwmon_remove), 208 .remove = __devexit_p(jz4740_hwmon_remove),
209 .driver = { 209 .driver = {
@@ -212,17 +212,7 @@ struct platform_driver jz4740_hwmon_driver = {
212 }, 212 },
213}; 213};
214 214
215static int __init jz4740_hwmon_init(void) 215module_platform_driver(jz4740_hwmon_driver);
216{
217 return platform_driver_register(&jz4740_hwmon_driver);
218}
219module_init(jz4740_hwmon_init);
220
221static void __exit jz4740_hwmon_exit(void)
222{
223 platform_driver_unregister(&jz4740_hwmon_driver);
224}
225module_exit(jz4740_hwmon_exit);
226 216
227MODULE_DESCRIPTION("JZ4740 SoC HWMON driver"); 217MODULE_DESCRIPTION("JZ4740 SoC HWMON driver");
228MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 218MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index eab11615dced..9b382ec2c3bd 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -432,19 +432,7 @@ static struct platform_driver ntc_thermistor_driver = {
432 .id_table = ntc_thermistor_id, 432 .id_table = ntc_thermistor_id,
433}; 433};
434 434
435static int __init ntc_thermistor_init(void) 435module_platform_driver(ntc_thermistor_driver);
436{
437 return platform_driver_register(&ntc_thermistor_driver);
438}
439
440module_init(ntc_thermistor_init);
441
442static void __exit ntc_thermistor_cleanup(void)
443{
444 platform_driver_unregister(&ntc_thermistor_driver);
445}
446
447module_exit(ntc_thermistor_cleanup);
448 436
449MODULE_DESCRIPTION("NTC Thermistor Driver"); 437MODULE_DESCRIPTION("NTC Thermistor Driver");
450MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 438MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index b39f52e2752a..f6c26d19f521 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -393,18 +393,7 @@ static struct platform_driver s3c_hwmon_driver = {
393 .remove = __devexit_p(s3c_hwmon_remove), 393 .remove = __devexit_p(s3c_hwmon_remove),
394}; 394};
395 395
396static int __init s3c_hwmon_init(void) 396module_platform_driver(s3c_hwmon_driver);
397{
398 return platform_driver_register(&s3c_hwmon_driver);
399}
400
401static void __exit s3c_hwmon_exit(void)
402{
403 platform_driver_unregister(&s3c_hwmon_driver);
404}
405
406module_init(s3c_hwmon_init);
407module_exit(s3c_hwmon_exit);
408 397
409MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 398MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
410MODULE_DESCRIPTION("S3C ADC HWMon driver"); 399MODULE_DESCRIPTION("S3C ADC HWMon driver");
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index e3b5c6039c25..79b6dabe3161 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -590,19 +590,8 @@ static struct platform_driver sch5627_driver = {
590 .remove = sch5627_remove, 590 .remove = sch5627_remove,
591}; 591};
592 592
593static int __init sch5627_init(void) 593module_platform_driver(sch5627_driver);
594{
595 return platform_driver_register(&sch5627_driver);
596}
597
598static void __exit sch5627_exit(void)
599{
600 platform_driver_unregister(&sch5627_driver);
601}
602 594
603MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver"); 595MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
604MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); 596MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
605MODULE_LICENSE("GPL"); 597MODULE_LICENSE("GPL");
606
607module_init(sch5627_init);
608module_exit(sch5627_exit);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 244407aa79fc..9d5236fb09b4 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -521,19 +521,8 @@ static struct platform_driver sch5636_driver = {
521 .remove = sch5636_remove, 521 .remove = sch5636_remove,
522}; 522};
523 523
524static int __init sch5636_init(void) 524module_platform_driver(sch5636_driver);
525{
526 return platform_driver_register(&sch5636_driver);
527}
528
529static void __exit sch5636_exit(void)
530{
531 platform_driver_unregister(&sch5636_driver);
532}
533 525
534MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver"); 526MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver");
535MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); 527MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
536MODULE_LICENSE("GPL"); 528MODULE_LICENSE("GPL");
537
538module_init(sch5636_init);
539module_exit(sch5636_exit);
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 57240740b161..0018c7dd0097 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -136,19 +136,7 @@ static struct platform_driver twl4030_madc_hwmon_driver = {
136 }, 136 },
137}; 137};
138 138
139static int __init twl4030_madc_hwmon_init(void) 139module_platform_driver(twl4030_madc_hwmon_driver);
140{
141 return platform_driver_register(&twl4030_madc_hwmon_driver);
142}
143
144module_init(twl4030_madc_hwmon_init);
145
146static void __exit twl4030_madc_hwmon_exit(void)
147{
148 platform_driver_unregister(&twl4030_madc_hwmon_driver);
149}
150
151module_exit(twl4030_madc_hwmon_exit);
152 140
153MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver"); 141MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver");
154MODULE_LICENSE("GPL"); 142MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 3cd07bf42dca..b9a87e89bab4 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -309,15 +309,4 @@ static struct platform_driver env_driver = {
309 .remove = __devexit_p(env_remove), 309 .remove = __devexit_p(env_remove),
310}; 310};
311 311
312static int __init env_init(void) 312module_platform_driver(env_driver);
313{
314 return platform_driver_register(&env_driver);
315}
316
317static void __exit env_exit(void)
318{
319 platform_driver_unregister(&env_driver);
320}
321
322module_init(env_init);
323module_exit(env_exit);
diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c
index 97b1f834a471..9b598ed26020 100644
--- a/drivers/hwmon/wm831x-hwmon.c
+++ b/drivers/hwmon/wm831x-hwmon.c
@@ -209,17 +209,7 @@ static struct platform_driver wm831x_hwmon_driver = {
209 }, 209 },
210}; 210};
211 211
212static int __init wm831x_hwmon_init(void) 212module_platform_driver(wm831x_hwmon_driver);
213{
214 return platform_driver_register(&wm831x_hwmon_driver);
215}
216module_init(wm831x_hwmon_init);
217
218static void __exit wm831x_hwmon_exit(void)
219{
220 platform_driver_unregister(&wm831x_hwmon_driver);
221}
222module_exit(wm831x_hwmon_exit);
223 213
224MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); 214MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
225MODULE_DESCRIPTION("WM831x Hardware Monitoring"); 215MODULE_DESCRIPTION("WM831x Hardware Monitoring");
diff --git a/drivers/hwmon/wm8350-hwmon.c b/drivers/hwmon/wm8350-hwmon.c
index 13290595ca86..3ff67edbdc44 100644
--- a/drivers/hwmon/wm8350-hwmon.c
+++ b/drivers/hwmon/wm8350-hwmon.c
@@ -133,17 +133,7 @@ static struct platform_driver wm8350_hwmon_driver = {
133 }, 133 },
134}; 134};
135 135
136static int __init wm8350_hwmon_init(void) 136module_platform_driver(wm8350_hwmon_driver);
137{
138 return platform_driver_register(&wm8350_hwmon_driver);
139}
140module_init(wm8350_hwmon_init);
141
142static void __exit wm8350_hwmon_exit(void)
143{
144 platform_driver_unregister(&wm8350_hwmon_driver);
145}
146module_exit(wm8350_hwmon_exit);
147 137
148MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); 138MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
149MODULE_DESCRIPTION("WM8350 Hardware Monitoring"); 139MODULE_DESCRIPTION("WM8350 Hardware Monitoring");
diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c
index 143461a95ae4..86980fe04117 100644
--- a/drivers/hwspinlock/u8500_hsem.c
+++ b/drivers/hwspinlock/u8500_hsem.c
@@ -21,6 +21,7 @@
21 * General Public License for more details. 21 * General Public License for more details.
22 */ 22 */
23 23
24#include <linux/module.h>
24#include <linux/delay.h> 25#include <linux/delay.h>
25#include <linux/io.h> 26#include <linux/io.h>
26#include <linux/pm_runtime.h> 27#include <linux/pm_runtime.h>
@@ -108,10 +109,8 @@ static int __devinit u8500_hsem_probe(struct platform_device *pdev)
108 return -ENODEV; 109 return -ENODEV;
109 110
110 io_base = ioremap(res->start, resource_size(res)); 111 io_base = ioremap(res->start, resource_size(res));
111 if (!io_base) { 112 if (!io_base)
112 ret = -ENOMEM; 113 return -ENOMEM;
113 goto free_state;
114 }
115 114
116 /* make sure protocol 1 is selected */ 115 /* make sure protocol 1 is selected */
117 val = readl(io_base + HSEM_CTRL_REG); 116 val = readl(io_base + HSEM_CTRL_REG);
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 85584a547c25..525c7345fa0b 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -488,7 +488,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
488 488
489 if (flags & I2C_M_TEN) { 489 if (flags & I2C_M_TEN) {
490 /* a ten bit address */ 490 /* a ten bit address */
491 addr = 0xf0 | ((msg->addr >> 7) & 0x03); 491 addr = 0xf0 | ((msg->addr >> 7) & 0x06);
492 bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr); 492 bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr);
493 /* try extended address code...*/ 493 /* try extended address code...*/
494 ret = try_address(i2c_adap, addr, retries); 494 ret = try_address(i2c_adap, addr, retries);
@@ -498,7 +498,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
498 return -ENXIO; 498 return -ENXIO;
499 } 499 }
500 /* the remaining 8 bit address */ 500 /* the remaining 8 bit address */
501 ret = i2c_outb(i2c_adap, msg->addr & 0x7f); 501 ret = i2c_outb(i2c_adap, msg->addr & 0xff);
502 if ((ret != 1) && !nak_ok) { 502 if ((ret != 1) && !nak_ok) {
503 /* the chip did not ack / xmission error occurred */ 503 /* the chip did not ack / xmission error occurred */
504 dev_err(&i2c_adap->dev, "died at 2nd address code\n"); 504 dev_err(&i2c_adap->dev, "died at 2nd address code\n");
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 835e47b39bc2..03b615778887 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
593 i2c->adap.algo_data = i2c; 593 i2c->adap.algo_data = i2c;
594 i2c->adap.dev.parent = &pdev->dev; 594 i2c->adap.dev.parent = &pdev->dev;
595 595
596 mfp_set_groupg(&pdev->dev); 596 mfp_set_groupg(&pdev->dev, NULL);
597 597
598 clk_get_rate(i2c->clk); 598 clk_get_rate(i2c->clk);
599 599
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 131079a3e292..1e5606185b4f 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -539,8 +539,10 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
539 client->dev.type = &i2c_client_type; 539 client->dev.type = &i2c_client_type;
540 client->dev.of_node = info->of_node; 540 client->dev.of_node = info->of_node;
541 541
542 /* For 10-bit clients, add an arbitrary offset to avoid collisions */
542 dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), 543 dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
543 client->addr); 544 client->addr | ((client->flags & I2C_CLIENT_TEN)
545 ? 0xa000 : 0));
544 status = device_register(&client->dev); 546 status = device_register(&client->dev);
545 if (status) 547 if (status)
546 goto out_err; 548 goto out_err;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index c90ce50b619f..57a45ce84b2d 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -579,7 +579,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
579 return 0; 579 return 0;
580} 580}
581 581
582int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action, 582static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
583 void *data) 583 void *data)
584{ 584{
585 struct device *dev = data; 585 struct device *dev = data;
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index 67cbcfa35122..847553fd8b96 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer 2 * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer
3 * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator 3 * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator
4 * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz 4 * Copyright (C) 2007-2011 Bartlomiej Zolnierkiewicz
5 * 5 *
6 * CYPRESS CY82C693 chipset IDE controller 6 * CYPRESS CY82C693 chipset IDE controller
7 * 7 *
@@ -90,7 +90,7 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
90 u8 time_16, time_8; 90 u8 time_16, time_8;
91 91
92 /* select primary or secondary channel */ 92 /* select primary or secondary channel */
93 if (hwif->index > 0) { /* drive is on the secondary channel */ 93 if (drive->dn > 1) { /* drive is on the secondary channel */
94 dev = pci_get_slot(dev->bus, dev->devfn+1); 94 dev = pci_get_slot(dev->bus, dev->devfn+1);
95 if (!dev) { 95 if (!dev) {
96 printk(KERN_ERR "%s: tune_drive: " 96 printk(KERN_ERR "%s: tune_drive: "
@@ -141,7 +141,7 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
141 pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16); 141 pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
142 pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8); 142 pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
143 } 143 }
144 if (hwif->index > 0) 144 if (drive->dn > 1)
145 pci_dev_put(dev); 145 pci_dev_put(dev);
146} 146}
147 147
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 4a697a238e28..8716066a2f2b 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -521,8 +521,8 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
521 if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { 521 if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
522 d.init_dma = icside_dma_init; 522 d.init_dma = icside_dma_init;
523 d.port_ops = &icside_v6_port_ops; 523 d.port_ops = &icside_v6_port_ops;
524 } else
524 d.dma_ops = NULL; 525 d.dma_ops = NULL;
525 }
526 526
527 ret = ide_host_register(host, &d, hws); 527 ret = ide_host_register(host, &d, hws);
528 if (ret) 528 if (ret)
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 04b09564bfa9..8126824daccb 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -43,7 +43,6 @@
43/* For SCSI -> ATAPI command conversion */ 43/* For SCSI -> ATAPI command conversion */
44#include <scsi/scsi.h> 44#include <scsi/scsi.h>
45 45
46#include <linux/irq.h>
47#include <linux/io.h> 46#include <linux/io.h>
48#include <asm/byteorder.h> 47#include <asm/byteorder.h>
49#include <linux/uaccess.h> 48#include <linux/uaccess.h>
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 61fdf544fbd6..3d42043fec51 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -35,7 +35,6 @@
35#include <scsi/scsi_ioctl.h> 35#include <scsi/scsi_ioctl.h>
36 36
37#include <asm/byteorder.h> 37#include <asm/byteorder.h>
38#include <linux/irq.h>
39#include <linux/uaccess.h> 38#include <linux/uaccess.h>
40#include <linux/io.h> 39#include <linux/io.h>
41#include <asm/unaligned.h> 40#include <asm/unaligned.h>
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 7ecb1ade8874..ce8237d36159 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -41,7 +41,6 @@
41#include <scsi/scsi.h> 41#include <scsi/scsi.h>
42 42
43#include <asm/byteorder.h> 43#include <asm/byteorder.h>
44#include <linux/irq.h>
45#include <linux/uaccess.h> 44#include <linux/uaccess.h>
46#include <linux/io.h> 45#include <linux/io.h>
47#include <asm/unaligned.h> 46#include <asm/unaligned.h>
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
index b59d04c72051..1892e81fb00f 100644
--- a/drivers/ide/piix.c
+++ b/drivers/ide/piix.c
@@ -331,7 +331,7 @@ static const struct ide_port_ops ich_port_ops = {
331 .udma_mask = udma, \ 331 .udma_mask = udma, \
332 } 332 }
333 333
334#define DECLARE_ICH_DEV(udma) \ 334#define DECLARE_ICH_DEV(mwdma, udma) \
335 { \ 335 { \
336 .name = DRV_NAME, \ 336 .name = DRV_NAME, \
337 .init_chipset = init_chipset_ich, \ 337 .init_chipset = init_chipset_ich, \
@@ -340,7 +340,7 @@ static const struct ide_port_ops ich_port_ops = {
340 .port_ops = &ich_port_ops, \ 340 .port_ops = &ich_port_ops, \
341 .pio_mask = ATA_PIO4, \ 341 .pio_mask = ATA_PIO4, \
342 .swdma_mask = ATA_SWDMA2_ONLY, \ 342 .swdma_mask = ATA_SWDMA2_ONLY, \
343 .mwdma_mask = ATA_MWDMA12_ONLY, \ 343 .mwdma_mask = mwdma, \
344 .udma_mask = udma, \ 344 .udma_mask = udma, \
345 } 345 }
346 346
@@ -362,13 +362,15 @@ static const struct ide_port_info piix_pci_info[] __devinitdata = {
362 /* 2: PIIX4 */ 362 /* 2: PIIX4 */
363 DECLARE_PIIX_DEV(ATA_UDMA2), 363 DECLARE_PIIX_DEV(ATA_UDMA2),
364 /* 3: ICH0 */ 364 /* 3: ICH0 */
365 DECLARE_ICH_DEV(ATA_UDMA2), 365 DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA2),
366 /* 4: ICH */ 366 /* 4: ICH */
367 DECLARE_ICH_DEV(ATA_UDMA4), 367 DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA4),
368 /* 5: PIIX4 */ 368 /* 5: PIIX4 */
369 DECLARE_PIIX_DEV(ATA_UDMA4), 369 DECLARE_PIIX_DEV(ATA_UDMA4),
370 /* 6: ICH[2-7]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */ 370 /* 6: ICH[2-6]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */
371 DECLARE_ICH_DEV(ATA_UDMA5), 371 DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA5),
372 /* 7: ICH7/7-R, no MWDMA1 */
373 DECLARE_ICH_DEV(ATA_MWDMA2_ONLY, ATA_UDMA5),
372}; 374};
373 375
374/** 376/**
@@ -438,9 +440,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
438#endif 440#endif
439 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 }, 441 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 },
440 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 }, 442 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 },
441 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 6 }, 443 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 7 },
442 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 }, 444 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 },
443 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 6 }, 445 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 7 },
444 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 }, 446 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 },
445 { 0, }, 447 { 0, },
446}; 448};
diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
index e53a1b78378b..281c91426345 100644
--- a/drivers/ide/triflex.c
+++ b/drivers/ide/triflex.c
@@ -113,12 +113,26 @@ static const struct pci_device_id triflex_pci_tbl[] = {
113}; 113};
114MODULE_DEVICE_TABLE(pci, triflex_pci_tbl); 114MODULE_DEVICE_TABLE(pci, triflex_pci_tbl);
115 115
116#ifdef CONFIG_PM
117static int triflex_ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
118{
119 /*
120 * We must not disable or powerdown the device.
121 * APM bios refuses to suspend if IDE is not accessible.
122 */
123 pci_save_state(dev);
124 return 0;
125}
126#else
127#define triflex_ide_pci_suspend NULL
128#endif
129
116static struct pci_driver triflex_pci_driver = { 130static struct pci_driver triflex_pci_driver = {
117 .name = "TRIFLEX_IDE", 131 .name = "TRIFLEX_IDE",
118 .id_table = triflex_pci_tbl, 132 .id_table = triflex_pci_tbl,
119 .probe = triflex_init_one, 133 .probe = triflex_init_one,
120 .remove = ide_pci_remove, 134 .remove = ide_pci_remove,
121 .suspend = ide_pci_suspend, 135 .suspend = triflex_ide_pci_suspend,
122 .resume = ide_pci_resume, 136 .resume = ide_pci_resume,
123}; 137};
124 138
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 691276bafd78..e9cf51b1343b 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -216,7 +216,9 @@ static int addr4_resolve(struct sockaddr_in *src_in,
216 216
217 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev); 217 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
218 if (!neigh || !(neigh->nud_state & NUD_VALID)) { 218 if (!neigh || !(neigh->nud_state & NUD_VALID)) {
219 rcu_read_lock();
219 neigh_event_send(dst_get_neighbour(&rt->dst), NULL); 220 neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
221 rcu_read_unlock();
220 ret = -ENODATA; 222 ret = -ENODATA;
221 if (neigh) 223 if (neigh)
222 goto release; 224 goto release;
@@ -274,15 +276,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
274 goto put; 276 goto put;
275 } 277 }
276 278
279 rcu_read_lock();
277 neigh = dst_get_neighbour(dst); 280 neigh = dst_get_neighbour(dst);
278 if (!neigh || !(neigh->nud_state & NUD_VALID)) { 281 if (!neigh || !(neigh->nud_state & NUD_VALID)) {
279 if (neigh) 282 if (neigh)
280 neigh_event_send(neigh, NULL); 283 neigh_event_send(neigh, NULL);
281 ret = -ENODATA; 284 ret = -ENODATA;
282 goto put; 285 } else {
286 ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
283 } 287 }
284 288 rcu_read_unlock();
285 ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
286put: 289put:
287 dst_release(dst); 290 dst_release(dst);
288 return ret; 291 return ret;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index de6d0774e609..c88b12beef25 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1375,8 +1375,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1375 goto reject; 1375 goto reject;
1376 } 1376 }
1377 dst = &rt->dst; 1377 dst = &rt->dst;
1378 rcu_read_lock();
1378 neigh = dst_get_neighbour(dst); 1379 neigh = dst_get_neighbour(dst);
1379 l2t = t3_l2t_get(tdev, neigh, neigh->dev); 1380 l2t = t3_l2t_get(tdev, neigh, neigh->dev);
1381 rcu_read_unlock();
1380 if (!l2t) { 1382 if (!l2t) {
1381 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1383 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1382 __func__); 1384 __func__);
@@ -1946,10 +1948,12 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1946 } 1948 }
1947 ep->dst = &rt->dst; 1949 ep->dst = &rt->dst;
1948 1950
1951 rcu_read_lock();
1949 neigh = dst_get_neighbour(ep->dst); 1952 neigh = dst_get_neighbour(ep->dst);
1950 1953
1951 /* get a l2t entry */ 1954 /* get a l2t entry */
1952 ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev); 1955 ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
1956 rcu_read_unlock();
1953 if (!ep->l2t) { 1957 if (!ep->l2t) {
1954 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 1958 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1955 err = -ENOMEM; 1959 err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b36cdac9c558..0747004313ad 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -542,8 +542,10 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
542 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 542 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
543 mpa->private_data_size = htons(ep->plen); 543 mpa->private_data_size = htons(ep->plen);
544 mpa->revision = mpa_rev_to_use; 544 mpa->revision = mpa_rev_to_use;
545 if (mpa_rev_to_use == 1) 545 if (mpa_rev_to_use == 1) {
546 ep->tried_with_mpa_v1 = 1; 546 ep->tried_with_mpa_v1 = 1;
547 ep->retry_with_mpa_v1 = 0;
548 }
547 549
548 if (mpa_rev_to_use == 2) { 550 if (mpa_rev_to_use == 2) {
549 mpa->private_data_size += 551 mpa->private_data_size +=
@@ -1594,6 +1596,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1594 goto reject; 1596 goto reject;
1595 } 1597 }
1596 dst = &rt->dst; 1598 dst = &rt->dst;
1599 rcu_read_lock();
1597 neigh = dst_get_neighbour(dst); 1600 neigh = dst_get_neighbour(dst);
1598 if (neigh->dev->flags & IFF_LOOPBACK) { 1601 if (neigh->dev->flags & IFF_LOOPBACK) {
1599 pdev = ip_dev_find(&init_net, peer_ip); 1602 pdev = ip_dev_find(&init_net, peer_ip);
@@ -1620,6 +1623,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1620 rss_qid = dev->rdev.lldi.rxq_ids[ 1623 rss_qid = dev->rdev.lldi.rxq_ids[
1621 cxgb4_port_idx(neigh->dev) * step]; 1624 cxgb4_port_idx(neigh->dev) * step];
1622 } 1625 }
1626 rcu_read_unlock();
1623 if (!l2t) { 1627 if (!l2t) {
1624 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1628 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1625 __func__); 1629 __func__);
@@ -1820,6 +1824,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
1820 } 1824 }
1821 ep->dst = &rt->dst; 1825 ep->dst = &rt->dst;
1822 1826
1827 rcu_read_lock();
1823 neigh = dst_get_neighbour(ep->dst); 1828 neigh = dst_get_neighbour(ep->dst);
1824 1829
1825 /* get a l2t entry */ 1830 /* get a l2t entry */
@@ -1856,6 +1861,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
1856 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ 1861 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1857 cxgb4_port_idx(neigh->dev) * step]; 1862 cxgb4_port_idx(neigh->dev) * step];
1858 } 1863 }
1864 rcu_read_unlock();
1859 if (!ep->l2t) { 1865 if (!ep->l2t) {
1860 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 1866 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1861 err = -ENOMEM; 1867 err = -ENOMEM;
@@ -2301,6 +2307,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2301 } 2307 }
2302 ep->dst = &rt->dst; 2308 ep->dst = &rt->dst;
2303 2309
2310 rcu_read_lock();
2304 neigh = dst_get_neighbour(ep->dst); 2311 neigh = dst_get_neighbour(ep->dst);
2305 2312
2306 /* get a l2t entry */ 2313 /* get a l2t entry */
@@ -2339,6 +2346,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2339 ep->retry_with_mpa_v1 = 0; 2346 ep->retry_with_mpa_v1 = 0;
2340 ep->tried_with_mpa_v1 = 0; 2347 ep->tried_with_mpa_v1 = 0;
2341 } 2348 }
2349 rcu_read_unlock();
2342 if (!ep->l2t) { 2350 if (!ep->l2t) {
2343 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 2351 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
2344 err = -ENOMEM; 2352 err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index f35a935267e7..0f1607c8325a 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -311,7 +311,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
311 while (ptr != cq->sw_pidx) { 311 while (ptr != cq->sw_pidx) {
312 cqe = &cq->sw_queue[ptr]; 312 cqe = &cq->sw_queue[ptr];
313 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) && 313 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
314 (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq)) 314 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
315 (*count)++; 315 (*count)++;
316 if (++ptr == cq->size) 316 if (++ptr == cq->size)
317 ptr = 0; 317 ptr = 0;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index dfce9ea98a39..0a52d72371ee 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1377,9 +1377,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1377 neigh_release(neigh); 1377 neigh_release(neigh);
1378 } 1378 }
1379 1379
1380 if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) 1380 if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) {
1381 rcu_read_lock();
1381 neigh_event_send(dst_get_neighbour(&rt->dst), NULL); 1382 neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
1382 1383 rcu_read_unlock();
1384 }
1383 ip_rt_put(rt); 1385 ip_rt_put(rt);
1384 return rc; 1386 return rc;
1385} 1387}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bd2162b95dc..1d5895941e19 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2307,19 +2307,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2307 SYM_LSB(IBCCtrlA_0, MaxPktLen); 2307 SYM_LSB(IBCCtrlA_0, MaxPktLen);
2308 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */ 2308 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2309 2309
2310 /* initially come up waiting for TS1, without sending anything. */
2311 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2312 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2313
2314 ppd->cpspec->ibcctrl_a = val;
2315 /* 2310 /*
2316 * Reset the PCS interface to the serdes (and also ibc, which is still 2311 * Reset the PCS interface to the serdes (and also ibc, which is still
2317 * in reset from above). Writes new value of ibcctrl_a as last step. 2312 * in reset from above). Writes new value of ibcctrl_a as last step.
2318 */ 2313 */
2319 qib_7322_mini_pcs_reset(ppd); 2314 qib_7322_mini_pcs_reset(ppd);
2320 qib_write_kreg(dd, kr_scratch, 0ULL);
2321 /* clear the linkinit cmds */
2322 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2323 2315
2324 if (!ppd->cpspec->ibcctrl_b) { 2316 if (!ppd->cpspec->ibcctrl_b) {
2325 unsigned lse = ppd->link_speed_enabled; 2317 unsigned lse = ppd->link_speed_enabled;
@@ -2385,6 +2377,14 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2385 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn); 2377 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2386 set_vls(ppd); 2378 set_vls(ppd);
2387 2379
2380 /* initially come up DISABLED, without sending anything. */
2381 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2382 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2383 qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2384 qib_write_kreg(dd, kr_scratch, 0ULL);
2385 /* clear the linkinit cmds */
2386 ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2387
2388 /* be paranoid against later code motion, etc. */ 2388 /* be paranoid against later code motion, etc. */
2389 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); 2389 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2390 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable); 2390 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
@@ -5241,7 +5241,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5241 off */ 5241 off */
5242 if (ppd->dd->flags & QIB_HAS_QSFP) { 5242 if (ppd->dd->flags & QIB_HAS_QSFP) {
5243 qd->t_insert = get_jiffies_64(); 5243 qd->t_insert = get_jiffies_64();
5244 schedule_work(&qd->work); 5244 queue_work(ib_wq, &qd->work);
5245 } 5245 }
5246 spin_lock_irqsave(&ppd->sdma_lock, flags); 5246 spin_lock_irqsave(&ppd->sdma_lock, flags);
5247 if (__qib_sdma_running(ppd)) 5247 if (__qib_sdma_running(ppd))
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index e06c4ed383f1..fa71b1e666c5 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -480,18 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
480 udelay(20); /* Generous RST dwell */ 480 udelay(20); /* Generous RST dwell */
481 481
482 dd->f_gpio_mod(dd, mask, mask, mask); 482 dd->f_gpio_mod(dd, mask, mask, mask);
483 /* Spec says module can take up to two seconds! */
484 mask = QSFP_GPIO_MOD_PRS_N;
485 if (qd->ppd->hw_pidx)
486 mask <<= QSFP_GPIO_PORT2_SHIFT;
487
488 /* Do not try to wait here. Better to let event handle it */
489 if (!qib_qsfp_mod_present(qd->ppd))
490 goto bail;
491 /* We see a module, but it may be unwise to look yet. Just schedule */
492 qd->t_insert = get_jiffies_64();
493 queue_work(ib_wq, &qd->work);
494bail:
495 return; 483 return;
496} 484}
497 485
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0ef9af94997d..4115be54ba3b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -57,21 +57,24 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
57 struct ib_pd *pd, struct ib_ah_attr *attr) 57 struct ib_pd *pd, struct ib_ah_attr *attr)
58{ 58{
59 struct ipoib_ah *ah; 59 struct ipoib_ah *ah;
60 struct ib_ah *vah;
60 61
61 ah = kmalloc(sizeof *ah, GFP_KERNEL); 62 ah = kmalloc(sizeof *ah, GFP_KERNEL);
62 if (!ah) 63 if (!ah)
63 return NULL; 64 return ERR_PTR(-ENOMEM);
64 65
65 ah->dev = dev; 66 ah->dev = dev;
66 ah->last_send = 0; 67 ah->last_send = 0;
67 kref_init(&ah->ref); 68 kref_init(&ah->ref);
68 69
69 ah->ah = ib_create_ah(pd, attr); 70 vah = ib_create_ah(pd, attr);
70 if (IS_ERR(ah->ah)) { 71 if (IS_ERR(vah)) {
71 kfree(ah); 72 kfree(ah);
72 ah = NULL; 73 ah = (struct ipoib_ah *)vah;
73 } else 74 } else {
75 ah->ah = vah;
74 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah); 76 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
77 }
75 78
76 return ah; 79 return ah;
77} 80}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7567b6000230..83695b48b010 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -432,7 +432,7 @@ static void path_rec_completion(int status,
432 432
433 spin_lock_irqsave(&priv->lock, flags); 433 spin_lock_irqsave(&priv->lock, flags);
434 434
435 if (ah) { 435 if (!IS_ERR_OR_NULL(ah)) {
436 path->pathrec = *pathrec; 436 path->pathrec = *pathrec;
437 437
438 old_ah = path->ah; 438 old_ah = path->ah;
@@ -555,6 +555,7 @@ static int path_rec_start(struct net_device *dev,
555 return 0; 555 return 0;
556} 556}
557 557
558/* called with rcu_read_lock */
558static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) 559static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
559{ 560{
560 struct ipoib_dev_priv *priv = netdev_priv(dev); 561 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -636,6 +637,7 @@ err_drop:
636 spin_unlock_irqrestore(&priv->lock, flags); 637 spin_unlock_irqrestore(&priv->lock, flags);
637} 638}
638 639
640/* called with rcu_read_lock */
639static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) 641static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
640{ 642{
641 struct ipoib_dev_priv *priv = netdev_priv(skb->dev); 643 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
@@ -720,13 +722,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
720 struct neighbour *n = NULL; 722 struct neighbour *n = NULL;
721 unsigned long flags; 723 unsigned long flags;
722 724
725 rcu_read_lock();
723 if (likely(skb_dst(skb))) 726 if (likely(skb_dst(skb)))
724 n = dst_get_neighbour(skb_dst(skb)); 727 n = dst_get_neighbour(skb_dst(skb));
725 728
726 if (likely(n)) { 729 if (likely(n)) {
727 if (unlikely(!*to_ipoib_neigh(n))) { 730 if (unlikely(!*to_ipoib_neigh(n))) {
728 ipoib_path_lookup(skb, dev); 731 ipoib_path_lookup(skb, dev);
729 return NETDEV_TX_OK; 732 goto unlock;
730 } 733 }
731 734
732 neigh = *to_ipoib_neigh(n); 735 neigh = *to_ipoib_neigh(n);
@@ -749,17 +752,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
749 ipoib_neigh_free(dev, neigh); 752 ipoib_neigh_free(dev, neigh);
750 spin_unlock_irqrestore(&priv->lock, flags); 753 spin_unlock_irqrestore(&priv->lock, flags);
751 ipoib_path_lookup(skb, dev); 754 ipoib_path_lookup(skb, dev);
752 return NETDEV_TX_OK; 755 goto unlock;
753 } 756 }
754 757
755 if (ipoib_cm_get(neigh)) { 758 if (ipoib_cm_get(neigh)) {
756 if (ipoib_cm_up(neigh)) { 759 if (ipoib_cm_up(neigh)) {
757 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 760 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
758 return NETDEV_TX_OK; 761 goto unlock;
759 } 762 }
760 } else if (neigh->ah) { 763 } else if (neigh->ah) {
761 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha)); 764 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
762 return NETDEV_TX_OK; 765 goto unlock;
763 } 766 }
764 767
765 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 768 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
@@ -793,13 +796,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
793 phdr->hwaddr + 4); 796 phdr->hwaddr + 4);
794 dev_kfree_skb_any(skb); 797 dev_kfree_skb_any(skb);
795 ++dev->stats.tx_dropped; 798 ++dev->stats.tx_dropped;
796 return NETDEV_TX_OK; 799 goto unlock;
797 } 800 }
798 801
799 unicast_arp_send(skb, dev, phdr); 802 unicast_arp_send(skb, dev, phdr);
800 } 803 }
801 } 804 }
802 805unlock:
806 rcu_read_unlock();
803 return NETDEV_TX_OK; 807 return NETDEV_TX_OK;
804} 808}
805 809
@@ -837,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
837 dst = skb_dst(skb); 841 dst = skb_dst(skb);
838 n = NULL; 842 n = NULL;
839 if (dst) 843 if (dst)
840 n = dst_get_neighbour(dst); 844 n = dst_get_neighbour_raw(dst);
841 if ((!dst || !n) && daddr) { 845 if ((!dst || !n) && daddr) {
842 struct ipoib_pseudoheader *phdr = 846 struct ipoib_pseudoheader *phdr =
843 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); 847 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 1b7a97686356..873bff97e69e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -240,8 +240,11 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
240 av.grh.dgid = mcast->mcmember.mgid; 240 av.grh.dgid = mcast->mcmember.mgid;
241 241
242 ah = ipoib_create_ah(dev, priv->pd, &av); 242 ah = ipoib_create_ah(dev, priv->pd, &av);
243 if (!ah) { 243 if (IS_ERR(ah)) {
244 ipoib_warn(priv, "ib_address_create failed\n"); 244 ipoib_warn(priv, "ib_address_create failed %ld\n",
245 -PTR_ERR(ah));
246 /* use original error */
247 return PTR_ERR(ah);
245 } else { 248 } else {
246 spin_lock_irq(&priv->lock); 249 spin_lock_irq(&priv->lock);
247 mcast->ah = ah; 250 mcast->ah = ah;
@@ -266,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
266 269
267 skb->dev = dev; 270 skb->dev = dev;
268 if (dst) 271 if (dst)
269 n = dst_get_neighbour(dst); 272 n = dst_get_neighbour_raw(dst);
270 if (!dst || !n) { 273 if (!dst || !n) {
271 /* put pseudoheader back on for next time */ 274 /* put pseudoheader back on for next time */
272 skb_push(skb, sizeof (struct ipoib_pseudoheader)); 275 skb_push(skb, sizeof (struct ipoib_pseudoheader));
@@ -722,6 +725,8 @@ out:
722 if (mcast && mcast->ah) { 725 if (mcast && mcast->ah) {
723 struct dst_entry *dst = skb_dst(skb); 726 struct dst_entry *dst = skb_dst(skb);
724 struct neighbour *n = NULL; 727 struct neighbour *n = NULL;
728
729 rcu_read_lock();
725 if (dst) 730 if (dst)
726 n = dst_get_neighbour(dst); 731 n = dst_get_neighbour(dst);
727 if (n && !*to_ipoib_neigh(n)) { 732 if (n && !*to_ipoib_neigh(n)) {
@@ -734,7 +739,7 @@ out:
734 list_add_tail(&neigh->list, &mcast->neigh_list); 739 list_add_tail(&neigh->list, &mcast->neigh_list);
735 } 740 }
736 } 741 }
737 742 rcu_read_unlock();
738 spin_unlock_irqrestore(&priv->lock, flags); 743 spin_unlock_irqrestore(&priv->lock, flags);
739 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); 744 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
740 return; 745 return;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 09b93b11a274..e2a9867c19d5 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1210,18 +1210,28 @@ static int elantech_reconnect(struct psmouse *psmouse)
1210 */ 1210 */
1211static int elantech_set_properties(struct elantech_data *etd) 1211static int elantech_set_properties(struct elantech_data *etd)
1212{ 1212{
1213 /* This represents the version of IC body. */
1213 int ver = (etd->fw_version & 0x0f0000) >> 16; 1214 int ver = (etd->fw_version & 0x0f0000) >> 16;
1214 1215
1216 /* Early version of Elan touchpads doesn't obey the rule. */
1215 if (etd->fw_version < 0x020030 || etd->fw_version == 0x020600) 1217 if (etd->fw_version < 0x020030 || etd->fw_version == 0x020600)
1216 etd->hw_version = 1; 1218 etd->hw_version = 1;
1217 else if (etd->fw_version < 0x150600) 1219 else {
1218 etd->hw_version = 2; 1220 switch (ver) {
1219 else if (ver == 5) 1221 case 2:
1220 etd->hw_version = 3; 1222 case 4:
1221 else if (ver == 6) 1223 etd->hw_version = 2;
1222 etd->hw_version = 4; 1224 break;
1223 else 1225 case 5:
1224 return -1; 1226 etd->hw_version = 3;
1227 break;
1228 case 6:
1229 etd->hw_version = 4;
1230 break;
1231 default:
1232 return -1;
1233 }
1234 }
1225 1235
1226 /* 1236 /*
1227 * Turn on packet checking by default. 1237 * Turn on packet checking by default.
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index 4b2a42f9f0bb..d4d08bd9205b 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -24,6 +24,7 @@
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/serio.h> 25#include <linux/serio.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/module.h>
27 28
28#include <asm/mach-types.h> 29#include <asm/mach-types.h>
29#include <plat/board-ams-delta.h> 30#include <plat/board-ams-delta.h>
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index bb9f5d31f0d0..b4cfc6c8be89 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -431,6 +431,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
431 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), 431 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
432 }, 432 },
433 }, 433 },
434 {
435 /* Newer HP Pavilion dv4 models */
436 .matches = {
437 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
438 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
439 },
440 },
434 { } 441 { }
435}; 442};
436 443
@@ -560,6 +567,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
560 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), 567 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
561 }, 568 },
562 }, 569 },
570 {
571 /* Newer HP Pavilion dv4 models */
572 .matches = {
573 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
574 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
575 },
576 },
563 { } 577 { }
564}; 578};
565 579
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c0c7820d4c46..bdc447fd4766 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -405,6 +405,9 @@ int dmar_disabled = 0;
405int dmar_disabled = 1; 405int dmar_disabled = 1;
406#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/ 406#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
407 407
408int intel_iommu_enabled = 0;
409EXPORT_SYMBOL_GPL(intel_iommu_enabled);
410
408static int dmar_map_gfx = 1; 411static int dmar_map_gfx = 1;
409static int dmar_forcedac; 412static int dmar_forcedac;
410static int intel_iommu_strict; 413static int intel_iommu_strict;
@@ -3524,7 +3527,7 @@ found:
3524 return 0; 3527 return 0;
3525} 3528}
3526 3529
3527int dmar_parse_rmrr_atsr_dev(void) 3530int __init dmar_parse_rmrr_atsr_dev(void)
3528{ 3531{
3529 struct dmar_rmrr_unit *rmrr, *rmrr_n; 3532 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3530 struct dmar_atsr_unit *atsr, *atsr_n; 3533 struct dmar_atsr_unit *atsr, *atsr_n;
@@ -3647,6 +3650,8 @@ int __init intel_iommu_init(void)
3647 3650
3648 bus_register_notifier(&pci_bus_type, &device_nb); 3651 bus_register_notifier(&pci_bus_type, &device_nb);
3649 3652
3653 intel_iommu_enabled = 1;
3654
3650 return 0; 3655 return 0;
3651} 3656}
3652 3657
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index 07c9f189f314..6777ca049471 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -773,7 +773,7 @@ int __init parse_ioapics_under_ir(void)
773 return ir_supported; 773 return ir_supported;
774} 774}
775 775
776int ir_dev_scope_init(void) 776int __init ir_dev_scope_init(void)
777{ 777{
778 if (!intr_remapping_enabled) 778 if (!intr_remapping_enabled)
779 return 0; 779 return 0;
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 9c192e79f806..288da5c1499d 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/err.h> 14#include <linux/err.h>
14#include <linux/clk.h> 15#include <linux/clk.h>
15#include <linux/io.h> 16#include <linux/io.h>
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index e8fdb8830f69..46be456fcc00 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/err.h> 14#include <linux/err.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 33ec9e467772..9021182c4b76 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
242 case IIOCDOCFINT: 242 case IIOCDOCFINT:
243 if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid)) 243 if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid))
244 return (-EINVAL); /* invalid driver */ 244 return (-EINVAL); /* invalid driver */
245 if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) ==
246 sizeof(dioctl.cf_ctrl.msn))
247 return -EINVAL;
248 if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) ==
249 sizeof(dioctl.cf_ctrl.fwd_nr))
250 return -EINVAL;
245 if ((i = cf_command(dioctl.cf_ctrl.drvid, 251 if ((i = cf_command(dioctl.cf_ctrl.drvid,
246 (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2, 252 (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2,
247 dioctl.cf_ctrl.cfproc, 253 dioctl.cf_ctrl.cfproc,
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 1f73d7f7e024..2339d7396b9e 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
2756 char *c, 2756 char *c,
2757 *e; 2757 *e;
2758 2758
2759 if (strnlen(cfg->drvid, sizeof(cfg->drvid)) ==
2760 sizeof(cfg->drvid))
2761 return -EINVAL;
2759 drvidx = -1; 2762 drvidx = -1;
2760 chidx = -1; 2763 chidx = -1;
2761 strcpy(drvid, cfg->drvid); 2764 strcpy(drvid, cfg->drvid);
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 661b692573e7..6d5628bb0601 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -270,11 +270,8 @@ void led_blink_set(struct led_classdev *led_cdev,
270 del_timer_sync(&led_cdev->blink_timer); 270 del_timer_sync(&led_cdev->blink_timer);
271 271
272 if (led_cdev->blink_set && 272 if (led_cdev->blink_set &&
273 !led_cdev->blink_set(led_cdev, delay_on, delay_off)) { 273 !led_cdev->blink_set(led_cdev, delay_on, delay_off))
274 led_cdev->blink_delay_on = *delay_on;
275 led_cdev->blink_delay_off = *delay_off;
276 return; 274 return;
277 }
278 275
279 /* blink with 1 Hz as default if nothing specified */ 276 /* blink with 1 Hz as default if nothing specified */
280 if (!*delay_on && !*delay_off) 277 if (!*delay_on && !*delay_off)
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index 817f37a875c9..c9570fcf1cce 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -159,7 +159,7 @@ int macii_init(void)
159 err = macii_init_via(); 159 err = macii_init_via();
160 if (err) goto out; 160 if (err) goto out;
161 161
162 err = request_irq(IRQ_MAC_ADB, macii_interrupt, IRQ_FLG_LOCK, "ADB", 162 err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB",
163 macii_interrupt); 163 macii_interrupt);
164 if (err) goto out; 164 if (err) goto out;
165 165
diff --git a/drivers/macintosh/via-maciisi.c b/drivers/macintosh/via-maciisi.c
index 9ab5b0c34f0d..34d02a91b29f 100644
--- a/drivers/macintosh/via-maciisi.c
+++ b/drivers/macintosh/via-maciisi.c
@@ -122,8 +122,8 @@ maciisi_init(void)
122 return err; 122 return err;
123 } 123 }
124 124
125 if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, IRQ_FLG_LOCK | IRQ_FLG_FAST, 125 if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, 0, "ADB",
126 "ADB", maciisi_interrupt)) { 126 maciisi_interrupt)) {
127 printk(KERN_ERR "maciisi_init: can't get irq %d\n", IRQ_MAC_ADB); 127 printk(KERN_ERR "maciisi_init: can't get irq %d\n", IRQ_MAC_ADB);
128 return -EAGAIN; 128 return -EAGAIN;
129 } 129 }
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7878712721bf..b6907118283a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap)
1106 */ 1106 */
1107 int i; 1107 int i;
1108 1108
1109 spin_lock_irq(&bitmap->lock);
1109 for (i = 0; i < bitmap->file_pages; i++) 1110 for (i = 0; i < bitmap->file_pages; i++)
1110 set_page_attr(bitmap, bitmap->filemap[i], 1111 set_page_attr(bitmap, bitmap->filemap[i],
1111 BITMAP_PAGE_NEEDWRITE); 1112 BITMAP_PAGE_NEEDWRITE);
1112 bitmap->allclean = 0; 1113 bitmap->allclean = 0;
1114 spin_unlock_irq(&bitmap->lock);
1113} 1115}
1114 1116
1115static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) 1117static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
@@ -1605,7 +1607,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
1605 for (chunk = s; chunk <= e; chunk++) { 1607 for (chunk = s; chunk <= e; chunk++) {
1606 sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap); 1608 sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
1607 bitmap_set_memory_bits(bitmap, sec, 1); 1609 bitmap_set_memory_bits(bitmap, sec, 1);
1610 spin_lock_irq(&bitmap->lock);
1608 bitmap_file_set_bit(bitmap, sec); 1611 bitmap_file_set_bit(bitmap, sec);
1612 spin_unlock_irq(&bitmap->lock);
1609 if (sec < bitmap->mddev->recovery_cp) 1613 if (sec < bitmap->mddev->recovery_cp)
1610 /* We are asserting that the array is dirty, 1614 /* We are asserting that the array is dirty,
1611 * so move the recovery_cp address back so 1615 * so move the recovery_cp address back so
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 84acfe7d10e4..ee981737edfc 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -570,7 +570,7 @@ static void mddev_put(struct mddev *mddev)
570 mddev->ctime == 0 && !mddev->hold_active) { 570 mddev->ctime == 0 && !mddev->hold_active) {
571 /* Array is not configured at all, and not held active, 571 /* Array is not configured at all, and not held active,
572 * so destroy it */ 572 * so destroy it */
573 list_del(&mddev->all_mddevs); 573 list_del_init(&mddev->all_mddevs);
574 bs = mddev->bio_set; 574 bs = mddev->bio_set;
575 mddev->bio_set = NULL; 575 mddev->bio_set = NULL;
576 if (mddev->gendisk) { 576 if (mddev->gendisk) {
@@ -2546,7 +2546,8 @@ state_show(struct md_rdev *rdev, char *page)
2546 sep = ","; 2546 sep = ",";
2547 } 2547 }
2548 if (test_bit(Blocked, &rdev->flags) || 2548 if (test_bit(Blocked, &rdev->flags) ||
2549 rdev->badblocks.unacked_exist) { 2549 (rdev->badblocks.unacked_exist
2550 && !test_bit(Faulty, &rdev->flags))) {
2550 len += sprintf(page+len, "%sblocked", sep); 2551 len += sprintf(page+len, "%sblocked", sep);
2551 sep = ","; 2552 sep = ",";
2552 } 2553 }
@@ -3788,6 +3789,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
3788 if (err) 3789 if (err)
3789 return err; 3790 return err;
3790 else { 3791 else {
3792 if (mddev->hold_active == UNTIL_IOCTL)
3793 mddev->hold_active = 0;
3791 sysfs_notify_dirent_safe(mddev->sysfs_state); 3794 sysfs_notify_dirent_safe(mddev->sysfs_state);
3792 return len; 3795 return len;
3793 } 3796 }
@@ -4487,11 +4490,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4487 4490
4488 if (!entry->show) 4491 if (!entry->show)
4489 return -EIO; 4492 return -EIO;
4493 spin_lock(&all_mddevs_lock);
4494 if (list_empty(&mddev->all_mddevs)) {
4495 spin_unlock(&all_mddevs_lock);
4496 return -EBUSY;
4497 }
4498 mddev_get(mddev);
4499 spin_unlock(&all_mddevs_lock);
4500
4490 rv = mddev_lock(mddev); 4501 rv = mddev_lock(mddev);
4491 if (!rv) { 4502 if (!rv) {
4492 rv = entry->show(mddev, page); 4503 rv = entry->show(mddev, page);
4493 mddev_unlock(mddev); 4504 mddev_unlock(mddev);
4494 } 4505 }
4506 mddev_put(mddev);
4495 return rv; 4507 return rv;
4496} 4508}
4497 4509
@@ -4507,13 +4519,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
4507 return -EIO; 4519 return -EIO;
4508 if (!capable(CAP_SYS_ADMIN)) 4520 if (!capable(CAP_SYS_ADMIN))
4509 return -EACCES; 4521 return -EACCES;
4522 spin_lock(&all_mddevs_lock);
4523 if (list_empty(&mddev->all_mddevs)) {
4524 spin_unlock(&all_mddevs_lock);
4525 return -EBUSY;
4526 }
4527 mddev_get(mddev);
4528 spin_unlock(&all_mddevs_lock);
4510 rv = mddev_lock(mddev); 4529 rv = mddev_lock(mddev);
4511 if (mddev->hold_active == UNTIL_IOCTL)
4512 mddev->hold_active = 0;
4513 if (!rv) { 4530 if (!rv) {
4514 rv = entry->store(mddev, page, length); 4531 rv = entry->store(mddev, page, length);
4515 mddev_unlock(mddev); 4532 mddev_unlock(mddev);
4516 } 4533 }
4534 mddev_put(mddev);
4517 return rv; 4535 return rv;
4518} 4536}
4519 4537
@@ -7840,6 +7858,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
7840 s + rdev->data_offset, sectors, acknowledged); 7858 s + rdev->data_offset, sectors, acknowledged);
7841 if (rv) { 7859 if (rv) {
7842 /* Make sure they get written out promptly */ 7860 /* Make sure they get written out promptly */
7861 sysfs_notify_dirent_safe(rdev->sysfs_state);
7843 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); 7862 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
7844 md_wakeup_thread(rdev->mddev->thread); 7863 md_wakeup_thread(rdev->mddev->thread);
7845 } 7864 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 472aedfb07cf..31670f8d6b65 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3036,6 +3036,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3036 if (dev->written) 3036 if (dev->written)
3037 s->written++; 3037 s->written++;
3038 rdev = rcu_dereference(conf->disks[i].rdev); 3038 rdev = rcu_dereference(conf->disks[i].rdev);
3039 if (rdev && test_bit(Faulty, &rdev->flags))
3040 rdev = NULL;
3039 if (rdev) { 3041 if (rdev) {
3040 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 3042 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3041 &first_bad, &bad_sectors); 3043 &first_bad, &bad_sectors);
@@ -3063,12 +3065,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3063 } 3065 }
3064 } else if (test_bit(In_sync, &rdev->flags)) 3066 } else if (test_bit(In_sync, &rdev->flags))
3065 set_bit(R5_Insync, &dev->flags); 3067 set_bit(R5_Insync, &dev->flags);
3066 else if (!test_bit(Faulty, &rdev->flags)) { 3068 else {
3067 /* in sync if before recovery_offset */ 3069 /* in sync if before recovery_offset */
3068 if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 3070 if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
3069 set_bit(R5_Insync, &dev->flags); 3071 set_bit(R5_Insync, &dev->flags);
3070 } 3072 }
3071 if (test_bit(R5_WriteError, &dev->flags)) { 3073 if (rdev && test_bit(R5_WriteError, &dev->flags)) {
3072 clear_bit(R5_Insync, &dev->flags); 3074 clear_bit(R5_Insync, &dev->flags);
3073 if (!test_bit(Faulty, &rdev->flags)) { 3075 if (!test_bit(Faulty, &rdev->flags)) {
3074 s->handle_bad_blocks = 1; 3076 s->handle_bad_blocks = 1;
@@ -3076,7 +3078,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3076 } else 3078 } else
3077 clear_bit(R5_WriteError, &dev->flags); 3079 clear_bit(R5_WriteError, &dev->flags);
3078 } 3080 }
3079 if (test_bit(R5_MadeGood, &dev->flags)) { 3081 if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
3080 if (!test_bit(Faulty, &rdev->flags)) { 3082 if (!test_bit(Faulty, &rdev->flags)) {
3081 s->handle_bad_blocks = 1; 3083 s->handle_bad_blocks = 1;
3082 atomic_inc(&rdev->nr_pending); 3084 atomic_inc(&rdev->nr_pending);
@@ -3110,7 +3112,7 @@ static void handle_stripe(struct stripe_head *sh)
3110 struct r5dev *pdev, *qdev; 3112 struct r5dev *pdev, *qdev;
3111 3113
3112 clear_bit(STRIPE_HANDLE, &sh->state); 3114 clear_bit(STRIPE_HANDLE, &sh->state);
3113 if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) { 3115 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
3114 /* already being handled, ensure it gets handled 3116 /* already being handled, ensure it gets handled
3115 * again when current action finishes */ 3117 * again when current action finishes */
3116 set_bit(STRIPE_HANDLE, &sh->state); 3118 set_bit(STRIPE_HANDLE, &sh->state);
@@ -3159,10 +3161,14 @@ static void handle_stripe(struct stripe_head *sh)
3159 /* check if the array has lost more than max_degraded devices and, 3161 /* check if the array has lost more than max_degraded devices and,
3160 * if so, some requests might need to be failed. 3162 * if so, some requests might need to be failed.
3161 */ 3163 */
3162 if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written) 3164 if (s.failed > conf->max_degraded) {
3163 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); 3165 sh->check_state = 0;
3164 if (s.failed > conf->max_degraded && s.syncing) 3166 sh->reconstruct_state = 0;
3165 handle_failed_sync(conf, sh, &s); 3167 if (s.to_read+s.to_write+s.written)
3168 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3169 if (s.syncing)
3170 handle_failed_sync(conf, sh, &s);
3171 }
3166 3172
3167 /* 3173 /*
3168 * might be able to return some write requests if the parity blocks 3174 * might be able to return some write requests if the parity blocks
@@ -3371,7 +3377,7 @@ finish:
3371 3377
3372 return_io(s.return_bi); 3378 return_io(s.return_bi);
3373 3379
3374 clear_bit(STRIPE_ACTIVE, &sh->state); 3380 clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
3375} 3381}
3376 3382
3377static void raid5_activate_delayed(struct r5conf *conf) 3383static void raid5_activate_delayed(struct r5conf *conf)
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c
index 2e8c288258a9..34434557ef65 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c
@@ -398,7 +398,6 @@ static int mxl111sf_i2c_readagain(struct mxl111sf_state *state,
398 u8 i2c_r_data[24]; 398 u8 i2c_r_data[24];
399 u8 i = 0; 399 u8 i = 0;
400 u8 fifo_status = 0; 400 u8 fifo_status = 0;
401 int ret;
402 int status = 0; 401 int status = 0;
403 402
404 mxl_i2c("read %d bytes", count); 403 mxl_i2c("read %d bytes", count);
@@ -418,7 +417,7 @@ static int mxl111sf_i2c_readagain(struct mxl111sf_state *state,
418 i2c_w_data[4+(i*3)] = 0x00; 417 i2c_w_data[4+(i*3)] = 0x00;
419 } 418 }
420 419
421 ret = mxl111sf_i2c_get_data(state, 0, i2c_w_data, i2c_r_data); 420 mxl111sf_i2c_get_data(state, 0, i2c_w_data, i2c_r_data);
422 421
423 /* Check for I2C NACK status */ 422 /* Check for I2C NACK status */
424 if (mxl111sf_i2c_check_status(state) == 1) { 423 if (mxl111sf_i2c_check_status(state) == 1) {
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-phy.c b/drivers/media/dvb/dvb-usb/mxl111sf-phy.c
index 91dc1fc2825b..b741b3a7a325 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf-phy.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-phy.c
@@ -296,8 +296,7 @@ int mxl111sf_config_spi(struct mxl111sf_state *state, int onoff)
296 goto fail; 296 goto fail;
297 297
298 ret = mxl111sf_write_reg(state, 0x00, 0x00); 298 ret = mxl111sf_write_reg(state, 0x00, 0x00);
299 if (mxl_fail(ret)) 299 mxl_fail(ret);
300 goto fail;
301fail: 300fail:
302 return ret; 301 return ret;
303} 302}
@@ -328,11 +327,13 @@ int mxl111sf_idac_config(struct mxl111sf_state *state,
328 /* set hysteresis value reg: 0x0B<5:0> */ 327 /* set hysteresis value reg: 0x0B<5:0> */
329 ret = mxl111sf_write_reg(state, V6_IDAC_HYSTERESIS_REG, 328 ret = mxl111sf_write_reg(state, V6_IDAC_HYSTERESIS_REG,
330 (hysteresis_value & 0x3F)); 329 (hysteresis_value & 0x3F));
330 mxl_fail(ret);
331 } 331 }
332 332
333 ret = mxl111sf_write_reg(state, V6_IDAC_SETTINGS_REG, val); 333 ret = mxl111sf_write_reg(state, V6_IDAC_SETTINGS_REG, val);
334 mxl_fail(ret);
334 335
335 return val; 336 return ret;
336} 337}
337 338
338/* 339/*
diff --git a/drivers/media/video/s5k6aa.c b/drivers/media/video/s5k6aa.c
index 2446736b7871..0df7f2a41814 100644
--- a/drivers/media/video/s5k6aa.c
+++ b/drivers/media/video/s5k6aa.c
@@ -19,6 +19,7 @@
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include <linux/media.h> 21#include <linux/media.h>
22#include <linux/module.h>
22#include <linux/regulator/consumer.h> 23#include <linux/regulator/consumer.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24 25
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index 725634d9736d..844a4d7797bc 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -220,8 +220,8 @@ static int vidioc_querycap(struct file *file, void *priv,
220 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); 220 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
221 cap->bus_info[0] = 0; 221 cap->bus_info[0] = 0;
222 cap->version = KERNEL_VERSION(1, 0, 0); 222 cap->version = KERNEL_VERSION(1, 0, 0);
223 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT 223 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
224 | V4L2_CAP_STREAMING; 224 V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING;
225 return 0; 225 return 0;
226} 226}
227 227
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index ecef127dbc66..1e8cdb77d4b8 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -785,8 +785,8 @@ static int vidioc_querycap(struct file *file, void *priv,
785 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); 785 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
786 cap->bus_info[0] = 0; 786 cap->bus_info[0] = 0;
787 cap->version = KERNEL_VERSION(1, 0, 0); 787 cap->version = KERNEL_VERSION(1, 0, 0);
788 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE 788 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE
789 | V4L2_CAP_VIDEO_OUTPUT 789 | V4L2_CAP_VIDEO_OUTPUT_MPLANE
790 | V4L2_CAP_STREAMING; 790 | V4L2_CAP_STREAMING;
791 return 0; 791 return 0;
792} 792}
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 10c2364f3e8a..254d32688843 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -1016,7 +1016,8 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
1016 1016
1017 menu_info = &mapping->menu_info[query_menu->index]; 1017 menu_info = &mapping->menu_info[query_menu->index];
1018 1018
1019 if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) { 1019 if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
1020 (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
1020 s32 bitmap; 1021 s32 bitmap;
1021 1022
1022 if (!ctrl->cached) { 1023 if (!ctrl->cached) {
@@ -1225,7 +1226,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
1225 /* Valid menu indices are reported by the GET_RES request for 1226 /* Valid menu indices are reported by the GET_RES request for
1226 * UVC controls that support it. 1227 * UVC controls that support it.
1227 */ 1228 */
1228 if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) { 1229 if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
1230 (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
1229 if (!ctrl->cached) { 1231 if (!ctrl->cached) {
1230 ret = uvc_ctrl_populate_cache(chain, ctrl); 1232 ret = uvc_ctrl_populate_cache(chain, ctrl);
1231 if (ret < 0) 1233 if (ret < 0)
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index f17f92b86a30..0f415dade05a 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -821,8 +821,8 @@ static void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
821 fill_event(&ev, ctrl, changes); 821 fill_event(&ev, ctrl, changes);
822 822
823 list_for_each_entry(sev, &ctrl->ev_subs, node) 823 list_for_each_entry(sev, &ctrl->ev_subs, node)
824 if (sev->fh && (sev->fh != fh || 824 if (sev->fh != fh ||
825 (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))) 825 (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))
826 v4l2_event_queue_fh(sev->fh, &ev); 826 v4l2_event_queue_fh(sev->fh, &ev);
827} 827}
828 828
@@ -947,6 +947,7 @@ static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
947 if (ctrl->cluster[0]->has_volatiles) 947 if (ctrl->cluster[0]->has_volatiles)
948 ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; 948 ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
949 } 949 }
950 fh = NULL;
950 } 951 }
951 if (changed || update_inactive) { 952 if (changed || update_inactive) {
952 /* If a control was changed that was not one of the controls 953 /* If a control was changed that was not one of the controls
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
index 46037f225529..c26ad9637143 100644
--- a/drivers/media/video/v4l2-event.c
+++ b/drivers/media/video/v4l2-event.c
@@ -216,6 +216,9 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
216 unsigned long flags; 216 unsigned long flags;
217 unsigned i; 217 unsigned i;
218 218
219 if (sub->type == V4L2_EVENT_ALL)
220 return -EINVAL;
221
219 if (elems < 1) 222 if (elems < 1)
220 elems = 1; 223 elems = 1;
221 if (sub->type == V4L2_EVENT_CTRL) { 224 if (sub->type == V4L2_EVENT_CTRL) {
@@ -283,6 +286,7 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
283{ 286{
284 struct v4l2_subscribed_event *sev; 287 struct v4l2_subscribed_event *sev;
285 unsigned long flags; 288 unsigned long flags;
289 int i;
286 290
287 if (sub->type == V4L2_EVENT_ALL) { 291 if (sub->type == V4L2_EVENT_ALL) {
288 v4l2_event_unsubscribe_all(fh); 292 v4l2_event_unsubscribe_all(fh);
@@ -293,8 +297,12 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
293 297
294 sev = v4l2_event_subscribed(fh, sub->type, sub->id); 298 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
295 if (sev != NULL) { 299 if (sev != NULL) {
300 /* Remove any pending events for this subscription */
301 for (i = 0; i < sev->in_use; i++) {
302 list_del(&sev->events[sev_pos(sev, i)].list);
303 fh->navailable--;
304 }
296 list_del(&sev->list); 305 list_del(&sev->list);
297 sev->fh = NULL;
298 } 306 }
299 307
300 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 308 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 979e544388cb..95a3f5e82aef 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -131,6 +131,7 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)
131 continue; 131 continue;
132 132
133 for (plane = 0; plane < vb->num_planes; ++plane) { 133 for (plane = 0; plane < vb->num_planes; ++plane) {
134 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
134 vb->v4l2_planes[plane].m.mem_offset = off; 135 vb->v4l2_planes[plane].m.mem_offset = off;
135 136
136 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", 137 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
@@ -264,6 +265,7 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
264 q->num_buffers -= buffers; 265 q->num_buffers -= buffers;
265 if (!q->num_buffers) 266 if (!q->num_buffers)
266 q->memory = 0; 267 q->memory = 0;
268 INIT_LIST_HEAD(&q->queued_list);
267} 269}
268 270
269/** 271/**
@@ -296,14 +298,14 @@ static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
296{ 298{
297 unsigned int plane; 299 unsigned int plane;
298 for (plane = 0; plane < vb->num_planes; ++plane) { 300 for (plane = 0; plane < vb->num_planes; ++plane) {
301 void *mem_priv = vb->planes[plane].mem_priv;
299 /* 302 /*
300 * If num_users() has not been provided, call_memop 303 * If num_users() has not been provided, call_memop
301 * will return 0, apparently nobody cares about this 304 * will return 0, apparently nobody cares about this
302 * case anyway. If num_users() returns more than 1, 305 * case anyway. If num_users() returns more than 1,
303 * we are not the only user of the plane's memory. 306 * we are not the only user of the plane's memory.
304 */ 307 */
305 if (call_memop(q, plane, num_users, 308 if (mem_priv && call_memop(q, plane, num_users, mem_priv) > 1)
306 vb->planes[plane].mem_priv) > 1)
307 return true; 309 return true;
308 } 310 }
309 return false; 311 return false;
diff --git a/drivers/mfd/ab5500-core.c b/drivers/mfd/ab5500-core.c
index 4175544b491b..ec10629a0b0b 100644
--- a/drivers/mfd/ab5500-core.c
+++ b/drivers/mfd/ab5500-core.c
@@ -13,6 +13,7 @@
13 * TODO: Event handling with irq_chip. Waiting for PRCMU fw support. 13 * TODO: Event handling with irq_chip. Waiting for PRCMU fw support.
14 */ 14 */
15 15
16#include <linux/module.h>
16#include <linux/mutex.h> 17#include <linux/mutex.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include <linux/platform_device.h> 19#include <linux/platform_device.h>
diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c
index 6be1fe6b5f9a..43c0ebb81956 100644
--- a/drivers/mfd/ab5500-debugfs.c
+++ b/drivers/mfd/ab5500-debugfs.c
@@ -4,6 +4,7 @@
4 * Debugfs support for the AB5500 MFD driver 4 * Debugfs support for the AB5500 MFD driver
5 */ 5 */
6 6
7#include <linux/export.h>
7#include <linux/debugfs.h> 8#include <linux/debugfs.h>
8#include <linux/seq_file.h> 9#include <linux/seq_file.h>
9#include <linux/mfd/ab5500/ab5500.h> 10#include <linux/mfd/ab5500/ab5500.h>
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index d593878d66d0..5664696f2d3a 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -472,7 +472,7 @@ config BMP085
472 module will be called bmp085. 472 module will be called bmp085.
473 473
474config PCH_PHUB 474config PCH_PHUB
475 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB" 475 tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
476 depends on PCI 476 depends on PCI
477 help 477 help
478 This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of 478 This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
@@ -480,12 +480,13 @@ config PCH_PHUB
480 processor. The Topcliff has MAC address and Option ROM data in SROM. 480 processor. The Topcliff has MAC address and Option ROM data in SROM.
481 This driver can access MAC address and Option ROM data in SROM. 481 This driver can access MAC address and Option ROM data in SROM.
482 482
483 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 483 This driver also can be used for LAPIS Semiconductor's IOH,
484 Output Hub), ML7213 and ML7223. 484 ML7213/ML7223/ML7831.
485 ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is 485 ML7213 which is for IVI(In-Vehicle Infotainment) use.
486 for MP(Media Phone) use. 486 ML7223 IOH is for MP(Media Phone) use.
487 ML7213/ML7223 is companion chip for Intel Atom E6xx series. 487 ML7831 IOH is for general purpose use.
488 ML7213/ML7223 is completely compatible for Intel EG20T PCH. 488 ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
489 ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
489 490
490 To compile this driver as a module, choose M here: the module will 491 To compile this driver as a module, choose M here: the module will
491 be called pch_phub. 492 be called pch_phub.
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index a662f5987b68..82b2cb77ae19 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -100,7 +100,7 @@ enum dpot_devid {
100 AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27), 100 AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
101 AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, 101 AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
102 BRDAC0, 7, 28), 102 BRDAC0, 7, 28),
103 AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, 103 AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
104 BRDAC0, 8, 29), 104 BRDAC0, 8, 29),
105 AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, 105 AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
106 BRDAC0 | BRDAC1, 8, 30), 106 BRDAC0 | BRDAC1, 8, 30),
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index 7ce6065dc20e..eb5cd28bc6d8 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -945,8 +945,7 @@ static int fpga_of_remove(struct platform_device *op)
945/* CTL-CPLD Version Register */ 945/* CTL-CPLD Version Register */
946#define CTL_CPLD_VERSION 0x2000 946#define CTL_CPLD_VERSION 0x2000
947 947
948static int fpga_of_probe(struct platform_device *op, 948static int fpga_of_probe(struct platform_device *op)
949 const struct of_device_id *match)
950{ 949{
951 struct device_node *of_node = op->dev.of_node; 950 struct device_node *of_node = op->dev.of_node;
952 struct device *this_device; 951 struct device *this_device;
@@ -1107,7 +1106,7 @@ static struct of_device_id fpga_of_match[] = {
1107 {}, 1106 {},
1108}; 1107};
1109 1108
1110static struct of_platform_driver fpga_of_driver = { 1109static struct platform_driver fpga_of_driver = {
1111 .probe = fpga_of_probe, 1110 .probe = fpga_of_probe,
1112 .remove = fpga_of_remove, 1111 .remove = fpga_of_remove,
1113 .driver = { 1112 .driver = {
@@ -1124,12 +1123,12 @@ static struct of_platform_driver fpga_of_driver = {
1124static int __init fpga_init(void) 1123static int __init fpga_init(void)
1125{ 1124{
1126 led_trigger_register_simple("fpga", &ledtrig_fpga); 1125 led_trigger_register_simple("fpga", &ledtrig_fpga);
1127 return of_register_platform_driver(&fpga_of_driver); 1126 return platform_driver_register(&fpga_of_driver);
1128} 1127}
1129 1128
1130static void __exit fpga_exit(void) 1129static void __exit fpga_exit(void)
1131{ 1130{
1132 of_unregister_platform_driver(&fpga_of_driver); 1131 platform_driver_unregister(&fpga_of_driver);
1133 led_trigger_unregister_simple(ledtrig_fpga); 1132 led_trigger_unregister_simple(ledtrig_fpga);
1134} 1133}
1135 1134
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 3965821fef17..14e974b2a781 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -1249,8 +1249,7 @@ static bool dma_filter(struct dma_chan *chan, void *data)
1249 return true; 1249 return true;
1250} 1250}
1251 1251
1252static int data_of_probe(struct platform_device *op, 1252static int data_of_probe(struct platform_device *op)
1253 const struct of_device_id *match)
1254{ 1253{
1255 struct device_node *of_node = op->dev.of_node; 1254 struct device_node *of_node = op->dev.of_node;
1256 struct device *this_device; 1255 struct device *this_device;
@@ -1401,7 +1400,7 @@ static struct of_device_id data_of_match[] = {
1401 {}, 1400 {},
1402}; 1401};
1403 1402
1404static struct of_platform_driver data_of_driver = { 1403static struct platform_driver data_of_driver = {
1405 .probe = data_of_probe, 1404 .probe = data_of_probe,
1406 .remove = data_of_remove, 1405 .remove = data_of_remove,
1407 .driver = { 1406 .driver = {
@@ -1417,12 +1416,12 @@ static struct of_platform_driver data_of_driver = {
1417 1416
1418static int __init data_init(void) 1417static int __init data_init(void)
1419{ 1418{
1420 return of_register_platform_driver(&data_of_driver); 1419 return platform_driver_register(&data_of_driver);
1421} 1420}
1422 1421
1423static void __exit data_exit(void) 1422static void __exit data_exit(void)
1424{ 1423{
1425 of_unregister_platform_driver(&data_of_driver); 1424 platform_driver_unregister(&data_of_driver);
1426} 1425}
1427 1426
1428MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); 1427MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 26cf12ca7f50..701edf658970 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -85,7 +85,7 @@ config EEPROM_93XX46
85 85
86config EEPROM_DIGSY_MTC_CFG 86config EEPROM_DIGSY_MTC_CFG
87 bool "DigsyMTC display configuration EEPROMs device" 87 bool "DigsyMTC display configuration EEPROMs device"
88 depends on PPC_MPC5200_GPIO && GPIOLIB && SPI_GPIO 88 depends on GPIO_MPC5200 && SPI_GPIO
89 help 89 help
90 This option enables access to display configuration EEPROMs 90 This option enables access to display configuration EEPROMs
91 on digsy_mtc board. You have to additionally select Microwire 91 on digsy_mtc board. You have to additionally select Microwire
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index dee33addcaeb..10fc4785dba7 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. 2 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -41,10 +41,10 @@
41#define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset 41#define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset
42 (Intel EG20T PCH)*/ 42 (Intel EG20T PCH)*/
43#define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address 43#define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address
44 offset(OKI SEMICONDUCTOR ML7213) 44 offset(LAPIS Semicon ML7213)
45 */ 45 */
46#define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address 46#define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address
47 offset(OKI SEMICONDUCTOR ML7223) 47 offset(LAPIS Semicon ML7223)
48 */ 48 */
49 49
50/* MAX number of INT_REDUCE_CONTROL registers */ 50/* MAX number of INT_REDUCE_CONTROL registers */
@@ -73,6 +73,9 @@
73#define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */ 73#define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */
74#define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */ 74#define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */
75 75
76/* Macros for ML7831 */
77#define PCI_DEVICE_ID_ROHM_ML7831_PHUB 0x8801
78
76/* SROM ACCESS Macro */ 79/* SROM ACCESS Macro */
77#define PCH_WORD_ADDR_MASK (~((1 << 2) - 1)) 80#define PCH_WORD_ADDR_MASK (~((1 << 2) - 1))
78 81
@@ -115,6 +118,7 @@
115 * @pch_mac_start_address: MAC address area start address 118 * @pch_mac_start_address: MAC address area start address
116 * @pch_opt_rom_start_address: Option ROM start address 119 * @pch_opt_rom_start_address: Option ROM start address
117 * @ioh_type: Save IOH type 120 * @ioh_type: Save IOH type
121 * @pdev: pointer to pci device struct
118 */ 122 */
119struct pch_phub_reg { 123struct pch_phub_reg {
120 u32 phub_id_reg; 124 u32 phub_id_reg;
@@ -136,6 +140,7 @@ struct pch_phub_reg {
136 u32 pch_mac_start_address; 140 u32 pch_mac_start_address;
137 u32 pch_opt_rom_start_address; 141 u32 pch_opt_rom_start_address;
138 int ioh_type; 142 int ioh_type;
143 struct pci_dev *pdev;
139}; 144};
140 145
141/* SROM SPEC for MAC address assignment offset */ 146/* SROM SPEC for MAC address assignment offset */
@@ -471,7 +476,7 @@ static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
471 int retval; 476 int retval;
472 int i; 477 int i;
473 478
474 if (chip->ioh_type == 1) /* EG20T */ 479 if ((chip->ioh_type == 1) || (chip->ioh_type == 5)) /* EG20T or ML7831*/
475 retval = pch_phub_gbe_serial_rom_conf(chip); 480 retval = pch_phub_gbe_serial_rom_conf(chip);
476 else /* ML7223 */ 481 else /* ML7223 */
477 retval = pch_phub_gbe_serial_rom_conf_mp(chip); 482 retval = pch_phub_gbe_serial_rom_conf_mp(chip);
@@ -498,6 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
498 unsigned int orom_size; 503 unsigned int orom_size;
499 int ret; 504 int ret;
500 int err; 505 int err;
506 ssize_t rom_size;
501 507
502 struct pch_phub_reg *chip = 508 struct pch_phub_reg *chip =
503 dev_get_drvdata(container_of(kobj, struct device, kobj)); 509 dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -509,6 +515,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
509 } 515 }
510 516
511 /* Get Rom signature */ 517 /* Get Rom signature */
518 chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
519 if (!chip->pch_phub_extrom_base_address)
520 goto exrom_map_err;
521
512 pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address, 522 pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address,
513 (unsigned char *)&rom_signature); 523 (unsigned char *)&rom_signature);
514 rom_signature &= 0xff; 524 rom_signature &= 0xff;
@@ -539,10 +549,13 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
539 goto return_err; 549 goto return_err;
540 } 550 }
541return_ok: 551return_ok:
552 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
542 mutex_unlock(&pch_phub_mutex); 553 mutex_unlock(&pch_phub_mutex);
543 return addr_offset; 554 return addr_offset;
544 555
545return_err: 556return_err:
557 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
558exrom_map_err:
546 mutex_unlock(&pch_phub_mutex); 559 mutex_unlock(&pch_phub_mutex);
547return_err_nomutex: 560return_err_nomutex:
548 return err; 561 return err;
@@ -555,6 +568,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
555 int err; 568 int err;
556 unsigned int addr_offset; 569 unsigned int addr_offset;
557 int ret; 570 int ret;
571 ssize_t rom_size;
558 struct pch_phub_reg *chip = 572 struct pch_phub_reg *chip =
559 dev_get_drvdata(container_of(kobj, struct device, kobj)); 573 dev_get_drvdata(container_of(kobj, struct device, kobj));
560 574
@@ -571,6 +585,12 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
571 goto return_ok; 585 goto return_ok;
572 } 586 }
573 587
588 chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
589 if (!chip->pch_phub_extrom_base_address) {
590 err = -ENOMEM;
591 goto exrom_map_err;
592 }
593
574 for (addr_offset = 0; addr_offset < count; addr_offset++) { 594 for (addr_offset = 0; addr_offset < count; addr_offset++) {
575 if (PCH_PHUB_OROM_SIZE < off + addr_offset) 595 if (PCH_PHUB_OROM_SIZE < off + addr_offset)
576 goto return_ok; 596 goto return_ok;
@@ -585,10 +605,14 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
585 } 605 }
586 606
587return_ok: 607return_ok:
608 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
588 mutex_unlock(&pch_phub_mutex); 609 mutex_unlock(&pch_phub_mutex);
589 return addr_offset; 610 return addr_offset;
590 611
591return_err: 612return_err:
613 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
614
615exrom_map_err:
592 mutex_unlock(&pch_phub_mutex); 616 mutex_unlock(&pch_phub_mutex);
593 return err; 617 return err;
594} 618}
@@ -598,8 +622,14 @@ static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr,
598{ 622{
599 u8 mac[8]; 623 u8 mac[8];
600 struct pch_phub_reg *chip = dev_get_drvdata(dev); 624 struct pch_phub_reg *chip = dev_get_drvdata(dev);
625 ssize_t rom_size;
626
627 chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
628 if (!chip->pch_phub_extrom_base_address)
629 return -ENOMEM;
601 630
602 pch_phub_read_gbe_mac_addr(chip, mac); 631 pch_phub_read_gbe_mac_addr(chip, mac);
632 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
603 633
604 return sprintf(buf, "%pM\n", mac); 634 return sprintf(buf, "%pM\n", mac);
605} 635}
@@ -608,6 +638,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
608 const char *buf, size_t count) 638 const char *buf, size_t count)
609{ 639{
610 u8 mac[6]; 640 u8 mac[6];
641 ssize_t rom_size;
611 struct pch_phub_reg *chip = dev_get_drvdata(dev); 642 struct pch_phub_reg *chip = dev_get_drvdata(dev);
612 643
613 if (count != 18) 644 if (count != 18)
@@ -617,7 +648,12 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
617 (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3], 648 (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3],
618 (u32 *)&mac[4], (u32 *)&mac[5]); 649 (u32 *)&mac[4], (u32 *)&mac[5]);
619 650
651 chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
652 if (!chip->pch_phub_extrom_base_address)
653 return -ENOMEM;
654
620 pch_phub_write_gbe_mac_addr(chip, mac); 655 pch_phub_write_gbe_mac_addr(chip, mac);
656 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
621 657
622 return count; 658 return count;
623} 659}
@@ -640,7 +676,6 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
640 int retval; 676 int retval;
641 677
642 int ret; 678 int ret;
643 ssize_t rom_size;
644 struct pch_phub_reg *chip; 679 struct pch_phub_reg *chip;
645 680
646 chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL); 681 chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL);
@@ -677,19 +712,7 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
677 "in pch_phub_base_address variable is %p\n", __func__, 712 "in pch_phub_base_address variable is %p\n", __func__,
678 chip->pch_phub_base_address); 713 chip->pch_phub_base_address);
679 714
680 if (id->driver_data != 3) { 715 chip->pdev = pdev; /* Save pci device struct */
681 chip->pch_phub_extrom_base_address =\
682 pci_map_rom(pdev, &rom_size);
683 if (chip->pch_phub_extrom_base_address == 0) {
684 dev_err(&pdev->dev, "%s: pci_map_rom FAILED", __func__);
685 ret = -ENOMEM;
686 goto err_pci_map;
687 }
688 dev_dbg(&pdev->dev, "%s : "
689 "pci_map_rom SUCCESS and value in "
690 "pch_phub_extrom_base_address variable is %p\n",
691 __func__, chip->pch_phub_extrom_base_address);
692 }
693 716
694 if (id->driver_data == 1) { /* EG20T PCH */ 717 if (id->driver_data == 1) { /* EG20T PCH */
695 const char *board_name; 718 const char *board_name;
@@ -763,6 +786,22 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
763 chip->pch_opt_rom_start_address =\ 786 chip->pch_opt_rom_start_address =\
764 PCH_PHUB_ROM_START_ADDR_ML7223; 787 PCH_PHUB_ROM_START_ADDR_ML7223;
765 chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223; 788 chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223;
789 } else if (id->driver_data == 5) { /* ML7831 */
790 retval = sysfs_create_file(&pdev->dev.kobj,
791 &dev_attr_pch_mac.attr);
792 if (retval)
793 goto err_sysfs_create;
794
795 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
796 if (retval)
797 goto exit_bin_attr;
798
799 /* set the prefech value */
800 iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14);
801 /* set the interrupt delay value */
802 iowrite32(0x25, chip->pch_phub_base_address + 0x44);
803 chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T;
804 chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T;
766 } 805 }
767 806
768 chip->ioh_type = id->driver_data; 807 chip->ioh_type = id->driver_data;
@@ -773,8 +812,6 @@ exit_bin_attr:
773 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); 812 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
774 813
775err_sysfs_create: 814err_sysfs_create:
776 pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
777err_pci_map:
778 pci_iounmap(pdev, chip->pch_phub_base_address); 815 pci_iounmap(pdev, chip->pch_phub_base_address);
779err_pci_iomap: 816err_pci_iomap:
780 pci_release_regions(pdev); 817 pci_release_regions(pdev);
@@ -792,7 +829,6 @@ static void __devexit pch_phub_remove(struct pci_dev *pdev)
792 829
793 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); 830 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
794 sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr); 831 sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr);
795 pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
796 pci_iounmap(pdev, chip->pch_phub_base_address); 832 pci_iounmap(pdev, chip->pch_phub_base_address);
797 pci_release_regions(pdev); 833 pci_release_regions(pdev);
798 pci_disable_device(pdev); 834 pci_disable_device(pdev);
@@ -847,6 +883,7 @@ static struct pci_device_id pch_phub_pcidev_id[] = {
847 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, }, 883 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, },
848 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, }, 884 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, },
849 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, }, 885 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, },
886 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7831_PHUB), 5, },
850 { } 887 { }
851}; 888};
852MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id); 889MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
@@ -873,5 +910,5 @@ static void __exit pch_phub_pci_exit(void)
873module_init(pch_phub_pci_init); 910module_init(pch_phub_pci_init);
874module_exit(pch_phub_pci_exit); 911module_exit(pch_phub_pci_exit);
875 912
876MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB"); 913MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7223) PHUB");
877MODULE_LICENSE("GPL"); 914MODULE_LICENSE("GPL");
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
index cfbddbef11de..43d073bc1d9c 100644
--- a/drivers/misc/spear13xx_pcie_gadget.c
+++ b/drivers/misc/spear13xx_pcie_gadget.c
@@ -903,6 +903,6 @@ static void __exit spear_pcie_gadget_exit(void)
903} 903}
904module_exit(spear_pcie_gadget_exit); 904module_exit(spear_pcie_gadget_exit);
905 905
906MODULE_ALIAS("pcie-gadget-spear"); 906MODULE_ALIAS("platform:pcie-gadget-spear");
907MODULE_AUTHOR("Pratyush Anand"); 907MODULE_AUTHOR("Pratyush Anand");
908MODULE_LICENSE("GPL"); 908MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a1cb21f95302..1e0e27cbe987 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1606,6 +1606,14 @@ static const struct mmc_fixup blk_fixups[] =
1606 MMC_QUIRK_BLK_NO_CMD23), 1606 MMC_QUIRK_BLK_NO_CMD23),
1607 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc, 1607 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1608 MMC_QUIRK_BLK_NO_CMD23), 1608 MMC_QUIRK_BLK_NO_CMD23),
1609
1610 /*
1611 * Some Micron MMC cards needs longer data read timeout than
1612 * indicated in CSD.
1613 */
1614 MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
1615 MMC_QUIRK_LONG_READ_TIME),
1616
1609 END_FIXUP 1617 END_FIXUP
1610}; 1618};
1611 1619
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5278ffb20e74..950b97d7412a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -529,6 +529,18 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
529 data->timeout_clks = 0; 529 data->timeout_clks = 0;
530 } 530 }
531 } 531 }
532
533 /*
534 * Some cards require longer data read timeout than indicated in CSD.
535 * Address this by setting the read timeout to a "reasonably high"
536 * value. For the cards tested, 300ms has proven enough. If necessary,
537 * this value can be increased if other problematic cards require this.
538 */
539 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
540 data->timeout_ns = 300000000;
541 data->timeout_clks = 0;
542 }
543
532 /* 544 /*
533 * Some cards need very high timeouts if driven in SPI mode. 545 * Some cards need very high timeouts if driven in SPI mode.
534 * The worst observed timeout was 900ms after writing a 546 * The worst observed timeout was 900ms after writing a
@@ -1213,6 +1225,46 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1213 mmc_host_clk_release(host); 1225 mmc_host_clk_release(host);
1214} 1226}
1215 1227
1228static void mmc_poweroff_notify(struct mmc_host *host)
1229{
1230 struct mmc_card *card;
1231 unsigned int timeout;
1232 unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
1233 int err = 0;
1234
1235 card = host->card;
1236
1237 /*
1238 * Send power notify command only if card
1239 * is mmc and notify state is powered ON
1240 */
1241 if (card && mmc_card_mmc(card) &&
1242 (card->poweroff_notify_state == MMC_POWERED_ON)) {
1243
1244 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
1245 notify_type = EXT_CSD_POWER_OFF_SHORT;
1246 timeout = card->ext_csd.generic_cmd6_time;
1247 card->poweroff_notify_state = MMC_POWEROFF_SHORT;
1248 } else {
1249 notify_type = EXT_CSD_POWER_OFF_LONG;
1250 timeout = card->ext_csd.power_off_longtime;
1251 card->poweroff_notify_state = MMC_POWEROFF_LONG;
1252 }
1253
1254 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1255 EXT_CSD_POWER_OFF_NOTIFICATION,
1256 notify_type, timeout);
1257
1258 if (err && err != -EBADMSG)
1259 pr_err("Device failed to respond within %d poweroff "
1260 "time. Forcefully powering down the device\n",
1261 timeout);
1262
1263 /* Set the card state to no notification after the poweroff */
1264 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1265 }
1266}
1267
1216/* 1268/*
1217 * Apply power to the MMC stack. This is a two-stage process. 1269 * Apply power to the MMC stack. This is a two-stage process.
1218 * First, we enable power to the card without the clock running. 1270 * First, we enable power to the card without the clock running.
@@ -1269,42 +1321,12 @@ static void mmc_power_up(struct mmc_host *host)
1269 1321
1270void mmc_power_off(struct mmc_host *host) 1322void mmc_power_off(struct mmc_host *host)
1271{ 1323{
1272 struct mmc_card *card;
1273 unsigned int notify_type;
1274 unsigned int timeout;
1275 int err;
1276
1277 mmc_host_clk_hold(host); 1324 mmc_host_clk_hold(host);
1278 1325
1279 card = host->card;
1280 host->ios.clock = 0; 1326 host->ios.clock = 0;
1281 host->ios.vdd = 0; 1327 host->ios.vdd = 0;
1282 1328
1283 if (card && mmc_card_mmc(card) && 1329 mmc_poweroff_notify(host);
1284 (card->poweroff_notify_state == MMC_POWERED_ON)) {
1285
1286 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
1287 notify_type = EXT_CSD_POWER_OFF_SHORT;
1288 timeout = card->ext_csd.generic_cmd6_time;
1289 card->poweroff_notify_state = MMC_POWEROFF_SHORT;
1290 } else {
1291 notify_type = EXT_CSD_POWER_OFF_LONG;
1292 timeout = card->ext_csd.power_off_longtime;
1293 card->poweroff_notify_state = MMC_POWEROFF_LONG;
1294 }
1295
1296 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1297 EXT_CSD_POWER_OFF_NOTIFICATION,
1298 notify_type, timeout);
1299
1300 if (err && err != -EBADMSG)
1301 pr_err("Device failed to respond within %d poweroff "
1302 "time. Forcefully powering down the device\n",
1303 timeout);
1304
1305 /* Set the card state to no notification after the poweroff */
1306 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1307 }
1308 1330
1309 /* 1331 /*
1310 * Reset ocr mask to be the highest possible voltage supported for 1332 * Reset ocr mask to be the highest possible voltage supported for
@@ -2196,7 +2218,7 @@ int mmc_card_sleep(struct mmc_host *host)
2196 2218
2197 mmc_bus_get(host); 2219 mmc_bus_get(host);
2198 2220
2199 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 2221 if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
2200 err = host->bus_ops->sleep(host); 2222 err = host->bus_ops->sleep(host);
2201 2223
2202 mmc_bus_put(host); 2224 mmc_bus_put(host);
@@ -2302,8 +2324,17 @@ int mmc_suspend_host(struct mmc_host *host)
2302 * pre-claim the host. 2324 * pre-claim the host.
2303 */ 2325 */
2304 if (mmc_try_claim_host(host)) { 2326 if (mmc_try_claim_host(host)) {
2305 if (host->bus_ops->suspend) 2327 if (host->bus_ops->suspend) {
2328 /*
2329 * For eMMC 4.5 device send notify command
2330 * before sleep, because in sleep state eMMC 4.5
2331 * devices respond to only RESET and AWAKE cmd
2332 */
2333 mmc_poweroff_notify(host);
2306 err = host->bus_ops->suspend(host); 2334 err = host->bus_ops->suspend(host);
2335 }
2336 mmc_do_release_host(host);
2337
2307 if (err == -ENOSYS || !host->bus_ops->resume) { 2338 if (err == -ENOSYS || !host->bus_ops->resume) {
2308 /* 2339 /*
2309 * We simply "remove" the card in this case. 2340 * We simply "remove" the card in this case.
@@ -2318,7 +2349,6 @@ int mmc_suspend_host(struct mmc_host *host)
2318 host->pm_flags = 0; 2349 host->pm_flags = 0;
2319 err = 0; 2350 err = 0;
2320 } 2351 }
2321 mmc_do_release_host(host);
2322 } else { 2352 } else {
2323 err = -EBUSY; 2353 err = -EBUSY;
2324 } 2354 }
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index dbf421a6279c..d240427c1246 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -876,17 +876,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
876 * set the notification byte in the ext_csd register of device 876 * set the notification byte in the ext_csd register of device
877 */ 877 */
878 if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) && 878 if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
879 (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) { 879 (card->ext_csd.rev >= 6)) {
880 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 880 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
881 EXT_CSD_POWER_OFF_NOTIFICATION, 881 EXT_CSD_POWER_OFF_NOTIFICATION,
882 EXT_CSD_POWER_ON, 882 EXT_CSD_POWER_ON,
883 card->ext_csd.generic_cmd6_time); 883 card->ext_csd.generic_cmd6_time);
884 if (err && err != -EBADMSG) 884 if (err && err != -EBADMSG)
885 goto free_card; 885 goto free_card;
886 }
887 886
888 if (!err) 887 /*
889 card->poweroff_notify_state = MMC_POWERED_ON; 888 * The err can be -EBADMSG or 0,
889 * so check for success and update the flag
890 */
891 if (!err)
892 card->poweroff_notify_state = MMC_POWERED_ON;
893 }
890 894
891 /* 895 /*
892 * Activate high speed (if supported) 896 * Activate high speed (if supported)
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 325ea61e12d3..8e0fbe994047 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -732,6 +732,7 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
732 "failed to config DMA channel. Falling back to PIO\n"); 732 "failed to config DMA channel. Falling back to PIO\n");
733 dma_release_channel(host->dma); 733 dma_release_channel(host->dma);
734 host->do_dma = 0; 734 host->do_dma = 0;
735 host->dma = NULL;
735 } 736 }
736 } 737 }
737 738
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 101cd31c8220..d5fe43d53c51 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1010,6 +1010,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
1010 host->data->sg_len, 1010 host->data->sg_len,
1011 omap_hsmmc_get_dma_dir(host, host->data)); 1011 omap_hsmmc_get_dma_dir(host, host->data));
1012 omap_free_dma(dma_ch); 1012 omap_free_dma(dma_ch);
1013 host->data->host_cookie = 0;
1013 } 1014 }
1014 host->data = NULL; 1015 host->data = NULL;
1015} 1016}
@@ -1575,8 +1576,10 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
1575 struct mmc_data *data = mrq->data; 1576 struct mmc_data *data = mrq->data;
1576 1577
1577 if (host->use_dma) { 1578 if (host->use_dma) {
1578 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 1579 if (data->host_cookie)
1579 omap_hsmmc_get_dma_dir(host, data)); 1580 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
1581 data->sg_len,
1582 omap_hsmmc_get_dma_dir(host, data));
1580 data->host_cookie = 0; 1583 data->host_cookie = 0;
1581 } 1584 }
1582} 1585}
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 4b920b7621cf..87b6f079b6e0 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -15,6 +15,7 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/mmc/host.h> 17#include <linux/mmc/host.h>
18#include <linux/module.h>
18#include <mach/cns3xxx.h> 19#include <mach/cns3xxx.h>
19#include "sdhci-pltfm.h" 20#include "sdhci-pltfm.h"
20 21
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index ae57769ba50d..4b976f00ea85 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -32,6 +32,7 @@
32/* VENDOR SPEC register */ 32/* VENDOR SPEC register */
33#define SDHCI_VENDOR_SPEC 0xC0 33#define SDHCI_VENDOR_SPEC 0xC0
34#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 34#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
35#define SDHCI_WTMK_LVL 0x44
35#define SDHCI_MIX_CTRL 0x48 36#define SDHCI_MIX_CTRL 0x48
36 37
37/* 38/*
@@ -476,6 +477,13 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
476 if (is_imx53_esdhc(imx_data)) 477 if (is_imx53_esdhc(imx_data))
477 imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; 478 imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
478 479
480 /*
481 * The imx6q ROM code will change the default watermark level setting
482 * to something insane. Change it back here.
483 */
484 if (is_imx6q_usdhc(imx_data))
485 writel(0x08100810, host->ioaddr + SDHCI_WTMK_LVL);
486
479 boarddata = &imx_data->boarddata; 487 boarddata = &imx_data->boarddata;
480 if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) { 488 if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
481 if (!host->mmc->parent->platform_data) { 489 if (!host->mmc->parent->platform_data) {
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 3d00e722efc9..cb60c4197e0a 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -644,8 +644,6 @@ static int sdhci_s3c_resume(struct platform_device *dev)
644static struct platform_driver sdhci_s3c_driver = { 644static struct platform_driver sdhci_s3c_driver = {
645 .probe = sdhci_s3c_probe, 645 .probe = sdhci_s3c_probe,
646 .remove = __devexit_p(sdhci_s3c_remove), 646 .remove = __devexit_p(sdhci_s3c_remove),
647 .suspend = sdhci_s3c_suspend,
648 .resume = sdhci_s3c_resume,
649 .driver = { 647 .driver = {
650 .owner = THIS_MODULE, 648 .owner = THIS_MODULE,
651 .name = "s3c-sdhci", 649 .name = "s3c-sdhci",
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 369366c8e205..d5505f3fe2a1 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -908,7 +908,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
908 if (host->power) { 908 if (host->power) {
909 pm_runtime_put(&host->pd->dev); 909 pm_runtime_put(&host->pd->dev);
910 host->power = false; 910 host->power = false;
911 if (p->down_pwr) 911 if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
912 p->down_pwr(host->pd); 912 p->down_pwr(host->pd);
913 } 913 }
914 host->state = STATE_IDLE; 914 host->state = STATE_IDLE;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index d85a60cda167..4208b3958069 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -798,7 +798,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
798 /* start bus clock */ 798 /* start bus clock */
799 tmio_mmc_clk_start(host); 799 tmio_mmc_clk_start(host);
800 } else if (ios->power_mode != MMC_POWER_UP) { 800 } else if (ios->power_mode != MMC_POWER_UP) {
801 if (host->set_pwr) 801 if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
802 host->set_pwr(host->pdev, 0); 802 host->set_pwr(host->pdev, 0);
803 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && 803 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
804 pdata->power) { 804 pdata->power) {
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index 608967fe74c6..736ca10ca9f1 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/module.h>
24#include <linux/mtd/map.h> 25#include <linux/mtd/map.h>
25#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
26#include <linux/mtd/partitions.h> 27#include <linux/mtd/partitions.h>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 583f66cd5bbd..654a5e94e0e7 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -245,6 +245,8 @@ source "drivers/net/ethernet/Kconfig"
245 245
246source "drivers/net/fddi/Kconfig" 246source "drivers/net/fddi/Kconfig"
247 247
248source "drivers/net/hippi/Kconfig"
249
248config NET_SB1000 250config NET_SB1000
249 tristate "General Instruments Surfboard 1000" 251 tristate "General Instruments Surfboard 1000"
250 depends on PNP 252 depends on PNP
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index a73d9dc80ff6..84fb6349a59a 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig ARCNET 5menuconfig ARCNET
6 depends on NETDEVICES && (ISA || PCI || PCMCIA) 6 depends on NETDEVICES && (ISA || PCI || PCMCIA)
7 bool "ARCnet support" 7 tristate "ARCnet support"
8 ---help--- 8 ---help---
9 If you have a network card of this type, say Y and check out the 9 If you have a network card of this type, say Y and check out the
10 (arguably) beautiful poetry in 10 (arguably) beautiful poetry in
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b0c577256487..7f8756825b8a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2553,30 +2553,6 @@ re_arm:
2553 } 2553 }
2554} 2554}
2555 2555
2556static __be32 bond_glean_dev_ip(struct net_device *dev)
2557{
2558 struct in_device *idev;
2559 struct in_ifaddr *ifa;
2560 __be32 addr = 0;
2561
2562 if (!dev)
2563 return 0;
2564
2565 rcu_read_lock();
2566 idev = __in_dev_get_rcu(dev);
2567 if (!idev)
2568 goto out;
2569
2570 ifa = idev->ifa_list;
2571 if (!ifa)
2572 goto out;
2573
2574 addr = ifa->ifa_local;
2575out:
2576 rcu_read_unlock();
2577 return addr;
2578}
2579
2580static int bond_has_this_ip(struct bonding *bond, __be32 ip) 2556static int bond_has_this_ip(struct bonding *bond, __be32 ip)
2581{ 2557{
2582 struct vlan_entry *vlan; 2558 struct vlan_entry *vlan;
@@ -3322,6 +3298,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3322 struct bonding *bond; 3298 struct bonding *bond;
3323 struct vlan_entry *vlan; 3299 struct vlan_entry *vlan;
3324 3300
3301 /* we only care about primary address */
3302 if(ifa->ifa_flags & IFA_F_SECONDARY)
3303 return NOTIFY_DONE;
3304
3325 list_for_each_entry(bond, &bn->dev_list, bond_list) { 3305 list_for_each_entry(bond, &bn->dev_list, bond_list) {
3326 if (bond->dev == event_dev) { 3306 if (bond->dev == event_dev) {
3327 switch (event) { 3307 switch (event) {
@@ -3329,7 +3309,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3329 bond->master_ip = ifa->ifa_local; 3309 bond->master_ip = ifa->ifa_local;
3330 return NOTIFY_OK; 3310 return NOTIFY_OK;
3331 case NETDEV_DOWN: 3311 case NETDEV_DOWN:
3332 bond->master_ip = bond_glean_dev_ip(bond->dev); 3312 bond->master_ip = 0;
3333 return NOTIFY_OK; 3313 return NOTIFY_OK;
3334 default: 3314 default:
3335 return NOTIFY_DONE; 3315 return NOTIFY_DONE;
@@ -3345,8 +3325,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3345 vlan->vlan_ip = ifa->ifa_local; 3325 vlan->vlan_ip = ifa->ifa_local;
3346 return NOTIFY_OK; 3326 return NOTIFY_OK;
3347 case NETDEV_DOWN: 3327 case NETDEV_DOWN:
3348 vlan->vlan_ip = 3328 vlan->vlan_ip = 0;
3349 bond_glean_dev_ip(vlan_dev);
3350 return NOTIFY_OK; 3329 return NOTIFY_OK;
3351 default: 3330 default:
3352 return NOTIFY_DONE; 3331 return NOTIFY_DONE;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 5a20804fdece..4ef7e2fd9fe6 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -319,6 +319,13 @@ static ssize_t bonding_store_mode(struct device *d,
319 goto out; 319 goto out;
320 } 320 }
321 321
322 if (bond->slave_cnt > 0) {
323 pr_err("unable to update mode of %s because it has slaves.\n",
324 bond->dev->name);
325 ret = -EPERM;
326 goto out;
327 }
328
322 new_value = bond_parse_parm(buf, bond_mode_tbl); 329 new_value = bond_parse_parm(buf, bond_mode_tbl);
323 if (new_value < 0) { 330 if (new_value < 0) {
324 pr_err("%s: Ignoring invalid mode value %.*s.\n", 331 pr_err("%s: Ignoring invalid mode value %.*s.\n",
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 905bce0b3a43..2c7f5036f570 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -20,7 +20,6 @@
20 */ 20 */
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/version.h>
24#include <linux/module.h> 23#include <linux/module.h>
25#include <linux/interrupt.h> 24#include <linux/interrupt.h>
26#include <linux/netdevice.h> 25#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 4cf835dbc122..3fb66d09ece5 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -608,7 +608,7 @@ static void b44_tx(struct b44 *bp)
608 skb->len, 608 skb->len,
609 DMA_TO_DEVICE); 609 DMA_TO_DEVICE);
610 rp->skb = NULL; 610 rp->skb = NULL;
611 dev_kfree_skb(skb); 611 dev_kfree_skb_irq(skb);
612 } 612 }
613 613
614 bp->tx_cons = cons; 614 bp->tx_cons = cons;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index bce203fa4b9e..882f48f0a03c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -10327,6 +10327,43 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10327 return 0; 10327 return 0;
10328} 10328}
10329 10329
10330
10331static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
10332 struct link_params *params, u8 mode)
10333{
10334 struct bnx2x *bp = params->bp;
10335 u16 temp;
10336
10337 bnx2x_cl22_write(bp, phy,
10338 MDIO_REG_GPHY_SHADOW,
10339 MDIO_REG_GPHY_SHADOW_LED_SEL1);
10340 bnx2x_cl22_read(bp, phy,
10341 MDIO_REG_GPHY_SHADOW,
10342 &temp);
10343 temp &= 0xff00;
10344
10345 DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
10346 switch (mode) {
10347 case LED_MODE_FRONT_PANEL_OFF:
10348 case LED_MODE_OFF:
10349 temp |= 0x00ee;
10350 break;
10351 case LED_MODE_OPER:
10352 temp |= 0x0001;
10353 break;
10354 case LED_MODE_ON:
10355 temp |= 0x00ff;
10356 break;
10357 default:
10358 break;
10359 }
10360 bnx2x_cl22_write(bp, phy,
10361 MDIO_REG_GPHY_SHADOW,
10362 MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
10363 return;
10364}
10365
10366
10330static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, 10367static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
10331 struct link_params *params) 10368 struct link_params *params)
10332{ 10369{
@@ -11103,7 +11140,7 @@ static struct bnx2x_phy phy_54618se = {
11103 .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback, 11140 .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
11104 .format_fw_ver = (format_fw_ver_t)NULL, 11141 .format_fw_ver = (format_fw_ver_t)NULL,
11105 .hw_reset = (hw_reset_t)NULL, 11142 .hw_reset = (hw_reset_t)NULL,
11106 .set_link_led = (set_link_led_t)NULL, 11143 .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
11107 .phy_specific_func = (phy_specific_func_t)NULL 11144 .phy_specific_func = (phy_specific_func_t)NULL
11108}; 11145};
11109/*****************************************************************/ 11146/*****************************************************************/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6486ab8c8fc8..2f6361e949f0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10548,33 +10548,38 @@ do { \
10548 10548
10549int bnx2x_init_firmware(struct bnx2x *bp) 10549int bnx2x_init_firmware(struct bnx2x *bp)
10550{ 10550{
10551 const char *fw_file_name;
10552 struct bnx2x_fw_file_hdr *fw_hdr; 10551 struct bnx2x_fw_file_hdr *fw_hdr;
10553 int rc; 10552 int rc;
10554 10553
10555 if (CHIP_IS_E1(bp))
10556 fw_file_name = FW_FILE_NAME_E1;
10557 else if (CHIP_IS_E1H(bp))
10558 fw_file_name = FW_FILE_NAME_E1H;
10559 else if (!CHIP_IS_E1x(bp))
10560 fw_file_name = FW_FILE_NAME_E2;
10561 else {
10562 BNX2X_ERR("Unsupported chip revision\n");
10563 return -EINVAL;
10564 }
10565 10554
10566 BNX2X_DEV_INFO("Loading %s\n", fw_file_name); 10555 if (!bp->firmware) {
10556 const char *fw_file_name;
10567 10557
10568 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); 10558 if (CHIP_IS_E1(bp))
10569 if (rc) { 10559 fw_file_name = FW_FILE_NAME_E1;
10570 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name); 10560 else if (CHIP_IS_E1H(bp))
10571 goto request_firmware_exit; 10561 fw_file_name = FW_FILE_NAME_E1H;
10572 } 10562 else if (!CHIP_IS_E1x(bp))
10563 fw_file_name = FW_FILE_NAME_E2;
10564 else {
10565 BNX2X_ERR("Unsupported chip revision\n");
10566 return -EINVAL;
10567 }
10568 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
10573 10569
10574 rc = bnx2x_check_firmware(bp); 10570 rc = request_firmware(&bp->firmware, fw_file_name,
10575 if (rc) { 10571 &bp->pdev->dev);
10576 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); 10572 if (rc) {
10577 goto request_firmware_exit; 10573 BNX2X_ERR("Can't load firmware file %s\n",
10574 fw_file_name);
10575 goto request_firmware_exit;
10576 }
10577
10578 rc = bnx2x_check_firmware(bp);
10579 if (rc) {
10580 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
10581 goto request_firmware_exit;
10582 }
10578 } 10583 }
10579 10584
10580 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; 10585 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
@@ -10630,6 +10635,7 @@ static void bnx2x_release_firmware(struct bnx2x *bp)
10630 kfree(bp->init_ops); 10635 kfree(bp->init_ops);
10631 kfree(bp->init_data); 10636 kfree(bp->init_data);
10632 release_firmware(bp->firmware); 10637 release_firmware(bp->firmware);
10638 bp->firmware = NULL;
10633} 10639}
10634 10640
10635 10641
@@ -10925,6 +10931,8 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10925 if (bp->doorbells) 10931 if (bp->doorbells)
10926 iounmap(bp->doorbells); 10932 iounmap(bp->doorbells);
10927 10933
10934 bnx2x_release_firmware(bp);
10935
10928 bnx2x_free_mem_bp(bp); 10936 bnx2x_free_mem_bp(bp);
10929 10937
10930 free_netdev(dev); 10938 free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index fc7bd0f23c0b..e58073ef33b4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -6990,6 +6990,7 @@ Theotherbitsarereservedandshouldbezero*/
6990#define MDIO_REG_INTR_MASK 0x1b 6990#define MDIO_REG_INTR_MASK 0x1b
6991#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1) 6991#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
6992#define MDIO_REG_GPHY_SHADOW 0x1c 6992#define MDIO_REG_GPHY_SHADOW 0x1c
6993#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
6993#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10) 6994#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
6994#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15) 6995#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
6995#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10) 6996#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 0440425c83d6..14517691f8db 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -5380,7 +5380,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
5380 rc = drv->init_fw(bp); 5380 rc = drv->init_fw(bp);
5381 if (rc) { 5381 if (rc) {
5382 BNX2X_ERR("Error loading firmware\n"); 5382 BNX2X_ERR("Error loading firmware\n");
5383 goto fw_init_err; 5383 goto init_err;
5384 } 5384 }
5385 5385
5386 /* Handle the beginning of COMMON_XXX pases separatelly... */ 5386 /* Handle the beginning of COMMON_XXX pases separatelly... */
@@ -5388,25 +5388,25 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
5388 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5388 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5389 rc = bnx2x_func_init_cmn_chip(bp, drv); 5389 rc = bnx2x_func_init_cmn_chip(bp, drv);
5390 if (rc) 5390 if (rc)
5391 goto init_hw_err; 5391 goto init_err;
5392 5392
5393 break; 5393 break;
5394 case FW_MSG_CODE_DRV_LOAD_COMMON: 5394 case FW_MSG_CODE_DRV_LOAD_COMMON:
5395 rc = bnx2x_func_init_cmn(bp, drv); 5395 rc = bnx2x_func_init_cmn(bp, drv);
5396 if (rc) 5396 if (rc)
5397 goto init_hw_err; 5397 goto init_err;
5398 5398
5399 break; 5399 break;
5400 case FW_MSG_CODE_DRV_LOAD_PORT: 5400 case FW_MSG_CODE_DRV_LOAD_PORT:
5401 rc = bnx2x_func_init_port(bp, drv); 5401 rc = bnx2x_func_init_port(bp, drv);
5402 if (rc) 5402 if (rc)
5403 goto init_hw_err; 5403 goto init_err;
5404 5404
5405 break; 5405 break;
5406 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5406 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5407 rc = bnx2x_func_init_func(bp, drv); 5407 rc = bnx2x_func_init_func(bp, drv);
5408 if (rc) 5408 if (rc)
5409 goto init_hw_err; 5409 goto init_err;
5410 5410
5411 break; 5411 break;
5412 default: 5412 default:
@@ -5414,10 +5414,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
5414 rc = -EINVAL; 5414 rc = -EINVAL;
5415 } 5415 }
5416 5416
5417init_hw_err: 5417init_err:
5418 drv->release_fw(bp);
5419
5420fw_init_err:
5421 drv->gunzip_end(bp); 5418 drv->gunzip_end(bp);
5422 5419
5423 /* In case of success, complete the comand immediatelly: no ramrods 5420 /* In case of success, complete the comand immediatelly: no ramrods
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 98849a1fc749..b48378a41e49 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -7,6 +7,7 @@ config HAVE_NET_MACB
7 7
8config NET_ATMEL 8config NET_ATMEL
9 bool "Atmel devices" 9 bool "Atmel devices"
10 default y
10 depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200) 11 depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
11 ---help--- 12 ---help---
12 If you have a network (Ethernet) card belonging to this class, say Y. 13 If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 438f4580bf66..2a22f5256353 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -613,7 +613,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
613 613
614 if (!dm->wake_state) 614 if (!dm->wake_state)
615 irq_set_irq_wake(dm->irq_wake, 1); 615 irq_set_irq_wake(dm->irq_wake, 1);
616 else if (dm->wake_state & !opts) 616 else if (dm->wake_state && !opts)
617 irq_set_irq_wake(dm->irq_wake, 0); 617 irq_set_irq_wake(dm->irq_wake, 0);
618 } 618 }
619 619
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index c520cfd3b298..5272f9d4dda9 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -24,6 +24,7 @@ config FEC
24 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 24 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
25 depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ 25 depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
26 ARCH_MXC || ARCH_MXS) 26 ARCH_MXC || ARCH_MXS)
27 default ARCH_MXC || ARCH_MXS if ARM
27 select PHYLIB 28 select PHYLIB
28 ---help--- 29 ---help---
29 Say Y here if you want to use the built-in 10/100 Fast ethernet 30 Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 1124ce0a1594..c136230d50bb 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -232,6 +232,7 @@ struct fec_enet_private {
232 struct platform_device *pdev; 232 struct platform_device *pdev;
233 233
234 int opened; 234 int opened;
235 int dev_id;
235 236
236 /* Phylib and MDIO interface */ 237 /* Phylib and MDIO interface */
237 struct mii_bus *mii_bus; 238 struct mii_bus *mii_bus;
@@ -837,7 +838,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
837 838
838 /* Adjust MAC if using macaddr */ 839 /* Adjust MAC if using macaddr */
839 if (iap == macaddr) 840 if (iap == macaddr)
840 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id; 841 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
841} 842}
842 843
843/* ------------------------------------------------------------------------- */ 844/* ------------------------------------------------------------------------- */
@@ -953,7 +954,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
953 char mdio_bus_id[MII_BUS_ID_SIZE]; 954 char mdio_bus_id[MII_BUS_ID_SIZE];
954 char phy_name[MII_BUS_ID_SIZE + 3]; 955 char phy_name[MII_BUS_ID_SIZE + 3];
955 int phy_id; 956 int phy_id;
956 int dev_id = fep->pdev->id; 957 int dev_id = fep->dev_id;
957 958
958 fep->phy_dev = NULL; 959 fep->phy_dev = NULL;
959 960
@@ -1031,7 +1032,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1031 * mdio interface in board design, and need to be configured by 1032 * mdio interface in board design, and need to be configured by
1032 * fec0 mii_bus. 1033 * fec0 mii_bus.
1033 */ 1034 */
1034 if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) { 1035 if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
1035 /* fec1 uses fec0 mii_bus */ 1036 /* fec1 uses fec0 mii_bus */
1036 fep->mii_bus = fec0_mii_bus; 1037 fep->mii_bus = fec0_mii_bus;
1037 return 0; 1038 return 0;
@@ -1063,7 +1064,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1063 fep->mii_bus->read = fec_enet_mdio_read; 1064 fep->mii_bus->read = fec_enet_mdio_read;
1064 fep->mii_bus->write = fec_enet_mdio_write; 1065 fep->mii_bus->write = fec_enet_mdio_write;
1065 fep->mii_bus->reset = fec_enet_mdio_reset; 1066 fep->mii_bus->reset = fec_enet_mdio_reset;
1066 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1); 1067 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
1067 fep->mii_bus->priv = fep; 1068 fep->mii_bus->priv = fep;
1068 fep->mii_bus->parent = &pdev->dev; 1069 fep->mii_bus->parent = &pdev->dev;
1069 1070
@@ -1521,6 +1522,7 @@ fec_probe(struct platform_device *pdev)
1521 int i, irq, ret = 0; 1522 int i, irq, ret = 0;
1522 struct resource *r; 1523 struct resource *r;
1523 const struct of_device_id *of_id; 1524 const struct of_device_id *of_id;
1525 static int dev_id;
1524 1526
1525 of_id = of_match_device(fec_dt_ids, &pdev->dev); 1527 of_id = of_match_device(fec_dt_ids, &pdev->dev);
1526 if (of_id) 1528 if (of_id)
@@ -1548,6 +1550,7 @@ fec_probe(struct platform_device *pdev)
1548 1550
1549 fep->hwp = ioremap(r->start, resource_size(r)); 1551 fep->hwp = ioremap(r->start, resource_size(r));
1550 fep->pdev = pdev; 1552 fep->pdev = pdev;
1553 fep->dev_id = dev_id++;
1551 1554
1552 if (!fep->hwp) { 1555 if (!fep->hwp) {
1553 ret = -ENOMEM; 1556 ret = -ENOMEM;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 52f4e8ad48e7..4d9f84b8ab97 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -183,28 +183,10 @@ void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
183} 183}
184EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name); 184EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
185 185
186/* Scan the bus in reverse, looking for an empty spot */
187static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
188{
189 int i;
190
191 for (i = PHY_MAX_ADDR; i > 0; i--) {
192 u32 phy_id;
193
194 if (get_phy_id(new_bus, i, &phy_id))
195 return -1;
196
197 if (phy_id == 0xffffffff)
198 break;
199 }
200
201 return i;
202}
203
204 186
205#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
206static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) 187static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
207{ 188{
189#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
208 struct gfar __iomem *enet_regs; 190 struct gfar __iomem *enet_regs;
209 191
210 /* 192 /*
@@ -220,15 +202,15 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
220 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") || 202 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
221 of_device_is_compatible(np, "fsl,etsec2-tbi")) { 203 of_device_is_compatible(np, "fsl,etsec2-tbi")) {
222 return of_iomap(np, 1); 204 return of_iomap(np, 1);
223 } else 205 }
224 return NULL;
225}
226#endif 206#endif
207 return NULL;
208}
227 209
228 210
229#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
230static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) 211static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
231{ 212{
213#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
232 struct device_node *np = NULL; 214 struct device_node *np = NULL;
233 int err = 0; 215 int err = 0;
234 216
@@ -261,9 +243,10 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
261 return err; 243 return err;
262 else 244 else
263 return -EINVAL; 245 return -EINVAL;
264} 246#else
247 return -ENODEV;
265#endif 248#endif
266 249}
267 250
268static int fsl_pq_mdio_probe(struct platform_device *ofdev) 251static int fsl_pq_mdio_probe(struct platform_device *ofdev)
269{ 252{
@@ -339,19 +322,13 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
339 of_device_is_compatible(np, "fsl,etsec2-mdio") || 322 of_device_is_compatible(np, "fsl,etsec2-mdio") ||
340 of_device_is_compatible(np, "fsl,etsec2-tbi") || 323 of_device_is_compatible(np, "fsl,etsec2-tbi") ||
341 of_device_is_compatible(np, "gianfar")) { 324 of_device_is_compatible(np, "gianfar")) {
342#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
343 tbipa = get_gfar_tbipa(regs, np); 325 tbipa = get_gfar_tbipa(regs, np);
344 if (!tbipa) { 326 if (!tbipa) {
345 err = -EINVAL; 327 err = -EINVAL;
346 goto err_free_irqs; 328 goto err_free_irqs;
347 } 329 }
348#else
349 err = -ENODEV;
350 goto err_free_irqs;
351#endif
352 } else if (of_device_is_compatible(np, "fsl,ucc-mdio") || 330 } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
353 of_device_is_compatible(np, "ucc_geth_phy")) { 331 of_device_is_compatible(np, "ucc_geth_phy")) {
354#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
355 u32 id; 332 u32 id;
356 static u32 mii_mng_master; 333 static u32 mii_mng_master;
357 334
@@ -364,10 +341,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
364 mii_mng_master = id; 341 mii_mng_master = id;
365 ucc_set_qe_mux_mii_mng(id - 1); 342 ucc_set_qe_mux_mii_mng(id - 1);
366 } 343 }
367#else
368 err = -ENODEV;
369 goto err_free_irqs;
370#endif
371 } else { 344 } else {
372 err = -ENODEV; 345 err = -ENODEV;
373 goto err_free_irqs; 346 goto err_free_irqs;
@@ -386,16 +359,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
386 } 359 }
387 360
388 if (tbiaddr == -1) { 361 if (tbiaddr == -1) {
389 out_be32(tbipa, 0);
390
391 tbiaddr = fsl_pq_mdio_find_free(new_bus);
392 }
393
394 /*
395 * We define TBIPA at 0 to be illegal, opting to fail for boards that
396 * have PHYs at 1-31, rather than change tbipa and rescan.
397 */
398 if (tbiaddr == 0) {
399 err = -EBUSY; 362 err = -EBUSY;
400 363
401 goto err_free_irqs; 364 goto err_free_irqs;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index 410d6a1984ed..6650068c996c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -61,9 +61,9 @@
61#ifdef EHEA_SMALL_QUEUES 61#ifdef EHEA_SMALL_QUEUES
62#define EHEA_MAX_CQE_COUNT 1023 62#define EHEA_MAX_CQE_COUNT 1023
63#define EHEA_DEF_ENTRIES_SQ 1023 63#define EHEA_DEF_ENTRIES_SQ 1023
64#define EHEA_DEF_ENTRIES_RQ1 4095 64#define EHEA_DEF_ENTRIES_RQ1 1023
65#define EHEA_DEF_ENTRIES_RQ2 1023 65#define EHEA_DEF_ENTRIES_RQ2 1023
66#define EHEA_DEF_ENTRIES_RQ3 1023 66#define EHEA_DEF_ENTRIES_RQ3 511
67#else 67#else
68#define EHEA_MAX_CQE_COUNT 4080 68#define EHEA_MAX_CQE_COUNT 4080
69#define EHEA_DEF_ENTRIES_SQ 4080 69#define EHEA_DEF_ENTRIES_SQ 4080
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 37b70f7052b6..bfeccbfde236 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -371,7 +371,8 @@ static void ehea_update_stats(struct work_struct *work)
371out_herr: 371out_herr:
372 free_page((unsigned long)cb2); 372 free_page((unsigned long)cb2);
373resched: 373resched:
374 schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000)); 374 schedule_delayed_work(&port->stats_work,
375 round_jiffies_relative(msecs_to_jiffies(1000)));
375} 376}
376 377
377static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) 378static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
@@ -2434,7 +2435,8 @@ static int ehea_open(struct net_device *dev)
2434 } 2435 }
2435 2436
2436 mutex_unlock(&port->port_lock); 2437 mutex_unlock(&port->port_lock);
2437 schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000)); 2438 schedule_delayed_work(&port->stats_work,
2439 round_jiffies_relative(msecs_to_jiffies(1000)));
2438 2440
2439 return ret; 2441 return ret;
2440} 2442}
diff --git a/drivers/net/ethernet/ibm/iseries_veth.c b/drivers/net/ethernet/ibm/iseries_veth.c
index 4326681df382..acc31af6594a 100644
--- a/drivers/net/ethernet/ibm/iseries_veth.c
+++ b/drivers/net/ethernet/ibm/iseries_veth.c
@@ -1421,7 +1421,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
1421 1421
1422 /* FIXME: do we need this? */ 1422 /* FIXME: do we need this? */
1423 memset(local_list, 0, sizeof(local_list)); 1423 memset(local_list, 0, sizeof(local_list));
1424 memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG)); 1424 memset(remote_list, 0, sizeof(remote_list));
1425 1425
1426 /* a 0 address marks the end of the valid entries */ 1426 /* a 0 address marks the end of the valid entries */
1427 if (senddata->addr[startchunk] == 0) 1427 if (senddata->addr[startchunk] == 0)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 7becff1f387d..76b84573566b 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1745,6 +1745,112 @@ jme_phy_off(struct jme_adapter *jme)
1745} 1745}
1746 1746
1747static int 1747static int
1748jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
1749{
1750 u32 phy_addr;
1751
1752 phy_addr = JM_PHY_SPEC_REG_READ | specreg;
1753 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
1754 phy_addr);
1755 return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
1756 JM_PHY_SPEC_DATA_REG);
1757}
1758
1759static void
1760jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
1761{
1762 u32 phy_addr;
1763
1764 phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
1765 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
1766 phy_data);
1767 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
1768 phy_addr);
1769}
1770
1771static int
1772jme_phy_calibration(struct jme_adapter *jme)
1773{
1774 u32 ctrl1000, phy_data;
1775
1776 jme_phy_off(jme);
1777 jme_phy_on(jme);
1778 /* Enabel PHY test mode 1 */
1779 ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
1780 ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
1781 ctrl1000 |= PHY_GAD_TEST_MODE_1;
1782 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
1783
1784 phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
1785 phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
1786 phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
1787 JM_PHY_EXT_COMM_2_CALI_ENABLE;
1788 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
1789 msleep(20);
1790 phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
1791 phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
1792 JM_PHY_EXT_COMM_2_CALI_MODE_0 |
1793 JM_PHY_EXT_COMM_2_CALI_LATCH);
1794 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
1795
1796 /* Disable PHY test mode */
1797 ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
1798 ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
1799 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
1800 return 0;
1801}
1802
1803static int
1804jme_phy_setEA(struct jme_adapter *jme)
1805{
1806 u32 phy_comm0 = 0, phy_comm1 = 0;
1807 u8 nic_ctrl;
1808
1809 pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
1810 if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
1811 return 0;
1812
1813 switch (jme->pdev->device) {
1814 case PCI_DEVICE_ID_JMICRON_JMC250:
1815 if (((jme->chip_main_rev == 5) &&
1816 ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
1817 (jme->chip_sub_rev == 3))) ||
1818 (jme->chip_main_rev >= 6)) {
1819 phy_comm0 = 0x008A;
1820 phy_comm1 = 0x4109;
1821 }
1822 if ((jme->chip_main_rev == 3) &&
1823 ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
1824 phy_comm0 = 0xE088;
1825 break;
1826 case PCI_DEVICE_ID_JMICRON_JMC260:
1827 if (((jme->chip_main_rev == 5) &&
1828 ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
1829 (jme->chip_sub_rev == 3))) ||
1830 (jme->chip_main_rev >= 6)) {
1831 phy_comm0 = 0x008A;
1832 phy_comm1 = 0x4109;
1833 }
1834 if ((jme->chip_main_rev == 3) &&
1835 ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
1836 phy_comm0 = 0xE088;
1837 if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
1838 phy_comm0 = 0x608A;
1839 if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
1840 phy_comm0 = 0x408A;
1841 break;
1842 default:
1843 return -ENODEV;
1844 }
1845 if (phy_comm0)
1846 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
1847 if (phy_comm1)
1848 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
1849
1850 return 0;
1851}
1852
1853static int
1748jme_open(struct net_device *netdev) 1854jme_open(struct net_device *netdev)
1749{ 1855{
1750 struct jme_adapter *jme = netdev_priv(netdev); 1856 struct jme_adapter *jme = netdev_priv(netdev);
@@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev)
1769 jme_set_settings(netdev, &jme->old_ecmd); 1875 jme_set_settings(netdev, &jme->old_ecmd);
1770 else 1876 else
1771 jme_reset_phy_processor(jme); 1877 jme_reset_phy_processor(jme);
1772 1878 jme_phy_calibration(jme);
1879 jme_phy_setEA(jme);
1773 jme_reset_link(jme); 1880 jme_reset_link(jme);
1774 1881
1775 return 0; 1882 return 0;
@@ -3184,7 +3291,8 @@ jme_resume(struct device *dev)
3184 jme_set_settings(netdev, &jme->old_ecmd); 3291 jme_set_settings(netdev, &jme->old_ecmd);
3185 else 3292 else
3186 jme_reset_phy_processor(jme); 3293 jme_reset_phy_processor(jme);
3187 3294 jme_phy_calibration(jme);
3295 jme_phy_setEA(jme);
3188 jme_start_irq(jme); 3296 jme_start_irq(jme);
3189 netif_device_attach(netdev); 3297 netif_device_attach(netdev);
3190 3298
@@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
3239MODULE_LICENSE("GPL"); 3347MODULE_LICENSE("GPL");
3240MODULE_VERSION(DRV_VERSION); 3348MODULE_VERSION(DRV_VERSION);
3241MODULE_DEVICE_TABLE(pci, jme_pci_tbl); 3349MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
3242
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 02ea27c1dcb5..4304072bd3c5 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -760,6 +760,25 @@ enum jme_rxmcs_bits {
760 RXMCS_CHECKSUM, 760 RXMCS_CHECKSUM,
761}; 761};
762 762
763/* Extern PHY common register 2 */
764
765#define PHY_GAD_TEST_MODE_1 0x00002000
766#define PHY_GAD_TEST_MODE_MSK 0x0000E000
767#define JM_PHY_SPEC_REG_READ 0x00004000
768#define JM_PHY_SPEC_REG_WRITE 0x00008000
769#define PHY_CALIBRATION_DELAY 20
770#define JM_PHY_SPEC_ADDR_REG 0x1E
771#define JM_PHY_SPEC_DATA_REG 0x1F
772
773#define JM_PHY_EXT_COMM_0_REG 0x30
774#define JM_PHY_EXT_COMM_1_REG 0x31
775#define JM_PHY_EXT_COMM_2_REG 0x32
776#define JM_PHY_EXT_COMM_2_CALI_ENABLE 0x01
777#define JM_PHY_EXT_COMM_2_CALI_MODE_0 0x02
778#define JM_PHY_EXT_COMM_2_CALI_LATCH 0x10
779#define PCI_PRIV_SHARE_NICCTRL 0xF5
780#define JME_FLAG_PHYEA_ENABLE 0x2
781
763/* 782/*
764 * Wakeup Frame setup interface registers 783 * Wakeup Frame setup interface registers
765 */ 784 */
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6bb2b9506cad..0b3567ab8121 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -34,6 +34,8 @@
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/dma-mapping.h>
38#include <linux/module.h>
37 39
38#include <asm/checksum.h> 40#include <asm/checksum.h>
39 41
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index fdc6c394c683..7803efa46eb2 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -50,7 +50,7 @@
50#include "sky2.h" 50#include "sky2.h"
51 51
52#define DRV_NAME "sky2" 52#define DRV_NAME "sky2"
53#define DRV_VERSION "1.29" 53#define DRV_VERSION "1.30"
54 54
55/* 55/*
56 * The Yukon II chipset takes 64 bit command blocks (called list elements) 56 * The Yukon II chipset takes 64 bit command blocks (called list elements)
@@ -68,7 +68,7 @@
68#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1)) 68#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
69#define TX_MIN_PENDING (MAX_SKB_TX_LE+1) 69#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
70#define TX_MAX_PENDING 1024 70#define TX_MAX_PENDING 1024
71#define TX_DEF_PENDING 127 71#define TX_DEF_PENDING 63
72 72
73#define TX_WATCHDOG (5 * HZ) 73#define TX_WATCHDOG (5 * HZ)
74#define NAPI_WEIGHT 64 74#define NAPI_WEIGHT 64
@@ -869,6 +869,7 @@ static void sky2_wol_init(struct sky2_port *sky2)
869 869
870 /* block receiver */ 870 /* block receiver */
871 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 871 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
872 sky2_read32(hw, B0_CTST);
872} 873}
873 874
874static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) 875static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
@@ -1274,6 +1275,14 @@ static void rx_set_checksum(struct sky2_port *sky2)
1274 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 1275 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
1275} 1276}
1276 1277
1278/*
1279 * Fixed initial key as seed to RSS.
1280 */
1281static const uint32_t rss_init_key[10] = {
1282 0x7c3351da, 0x51c5cf4e, 0x44adbdd1, 0xe8d38d18, 0x48897c43,
1283 0xb1d60e7e, 0x6a3dd760, 0x01a2e453, 0x16f46f13, 0x1a0e7b30
1284};
1285
1277/* Enable/disable receive hash calculation (RSS) */ 1286/* Enable/disable receive hash calculation (RSS) */
1278static void rx_set_rss(struct net_device *dev, u32 features) 1287static void rx_set_rss(struct net_device *dev, u32 features)
1279{ 1288{
@@ -1289,12 +1298,9 @@ static void rx_set_rss(struct net_device *dev, u32 features)
1289 1298
1290 /* Program RSS initial values */ 1299 /* Program RSS initial values */
1291 if (features & NETIF_F_RXHASH) { 1300 if (features & NETIF_F_RXHASH) {
1292 u32 key[nkeys];
1293
1294 get_random_bytes(key, nkeys * sizeof(u32));
1295 for (i = 0; i < nkeys; i++) 1301 for (i = 0; i < nkeys; i++)
1296 sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4), 1302 sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
1297 key[i]); 1303 rss_init_key[i]);
1298 1304
1299 /* Need to turn on (undocumented) flag to make hashing work */ 1305 /* Need to turn on (undocumented) flag to make hashing work */
1300 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), 1306 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
@@ -1717,6 +1723,8 @@ static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
1717 if (err) 1723 if (err)
1718 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); 1724 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
1719 else { 1725 else {
1726 hw->flags |= SKY2_HW_IRQ_SETUP;
1727
1720 napi_enable(&hw->napi); 1728 napi_enable(&hw->napi);
1721 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 1729 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
1722 sky2_read32(hw, B0_IMSK); 1730 sky2_read32(hw, B0_IMSK);
@@ -1727,7 +1735,7 @@ static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
1727 1735
1728 1736
1729/* Bring up network interface. */ 1737/* Bring up network interface. */
1730static int sky2_up(struct net_device *dev) 1738static int sky2_open(struct net_device *dev)
1731{ 1739{
1732 struct sky2_port *sky2 = netdev_priv(dev); 1740 struct sky2_port *sky2 = netdev_priv(dev);
1733 struct sky2_hw *hw = sky2->hw; 1741 struct sky2_hw *hw = sky2->hw;
@@ -1747,6 +1755,11 @@ static int sky2_up(struct net_device *dev)
1747 1755
1748 sky2_hw_up(sky2); 1756 sky2_hw_up(sky2);
1749 1757
1758 if (hw->chip_id == CHIP_ID_YUKON_OPT ||
1759 hw->chip_id == CHIP_ID_YUKON_PRM ||
1760 hw->chip_id == CHIP_ID_YUKON_OP_2)
1761 imask |= Y2_IS_PHY_QLNK; /* enable PHY Quick Link */
1762
1750 /* Enable interrupts from phy/mac for port */ 1763 /* Enable interrupts from phy/mac for port */
1751 imask = sky2_read32(hw, B0_IMSK); 1764 imask = sky2_read32(hw, B0_IMSK);
1752 imask |= portirq_msk[port]; 1765 imask |= portirq_msk[port];
@@ -2040,6 +2053,8 @@ static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
2040 2053
2041 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); 2054 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
2042 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 2055 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
2056
2057 sky2_read32(hw, B0_CTST);
2043} 2058}
2044 2059
2045static void sky2_hw_down(struct sky2_port *sky2) 2060static void sky2_hw_down(struct sky2_port *sky2)
@@ -2090,7 +2105,7 @@ static void sky2_hw_down(struct sky2_port *sky2)
2090} 2105}
2091 2106
2092/* Network shutdown */ 2107/* Network shutdown */
2093static int sky2_down(struct net_device *dev) 2108static int sky2_close(struct net_device *dev)
2094{ 2109{
2095 struct sky2_port *sky2 = netdev_priv(dev); 2110 struct sky2_port *sky2 = netdev_priv(dev);
2096 struct sky2_hw *hw = sky2->hw; 2111 struct sky2_hw *hw = sky2->hw;
@@ -2101,15 +2116,22 @@ static int sky2_down(struct net_device *dev)
2101 2116
2102 netif_info(sky2, ifdown, dev, "disabling interface\n"); 2117 netif_info(sky2, ifdown, dev, "disabling interface\n");
2103 2118
2104 /* Disable port IRQ */
2105 sky2_write32(hw, B0_IMSK,
2106 sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]);
2107 sky2_read32(hw, B0_IMSK);
2108
2109 if (hw->ports == 1) { 2119 if (hw->ports == 1) {
2120 sky2_write32(hw, B0_IMSK, 0);
2121 sky2_read32(hw, B0_IMSK);
2122
2110 napi_disable(&hw->napi); 2123 napi_disable(&hw->napi);
2111 free_irq(hw->pdev->irq, hw); 2124 free_irq(hw->pdev->irq, hw);
2125 hw->flags &= ~SKY2_HW_IRQ_SETUP;
2112 } else { 2126 } else {
2127 u32 imask;
2128
2129 /* Disable port IRQ */
2130 imask = sky2_read32(hw, B0_IMSK);
2131 imask &= ~portirq_msk[sky2->port];
2132 sky2_write32(hw, B0_IMSK, imask);
2133 sky2_read32(hw, B0_IMSK);
2134
2113 synchronize_irq(hw->pdev->irq); 2135 synchronize_irq(hw->pdev->irq);
2114 napi_synchronize(&hw->napi); 2136 napi_synchronize(&hw->napi);
2115 } 2137 }
@@ -2587,7 +2609,7 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
2587 if (netif_running(dev)) { 2609 if (netif_running(dev)) {
2588 sky2_tx_complete(sky2, last); 2610 sky2_tx_complete(sky2, last);
2589 2611
2590 /* Wake unless it's detached, and called e.g. from sky2_down() */ 2612 /* Wake unless it's detached, and called e.g. from sky2_close() */
2591 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) 2613 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
2592 netif_wake_queue(dev); 2614 netif_wake_queue(dev);
2593 } 2615 }
@@ -3258,7 +3280,6 @@ static void sky2_reset(struct sky2_hw *hw)
3258 hw->chip_id == CHIP_ID_YUKON_PRM || 3280 hw->chip_id == CHIP_ID_YUKON_PRM ||
3259 hw->chip_id == CHIP_ID_YUKON_OP_2) { 3281 hw->chip_id == CHIP_ID_YUKON_OP_2) {
3260 u16 reg; 3282 u16 reg;
3261 u32 msk;
3262 3283
3263 if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { 3284 if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
3264 /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */ 3285 /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
@@ -3281,11 +3302,6 @@ static void sky2_reset(struct sky2_hw *hw)
3281 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3302 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3282 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); 3303 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
3283 3304
3284 /* enable PHY Quick Link */
3285 msk = sky2_read32(hw, B0_IMSK);
3286 msk |= Y2_IS_PHY_QLNK;
3287 sky2_write32(hw, B0_IMSK, msk);
3288
3289 /* check if PSMv2 was running before */ 3305 /* check if PSMv2 was running before */
3290 reg = sky2_pci_read16(hw, PSM_CONFIG_REG3); 3306 reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
3291 if (reg & PCI_EXP_LNKCTL_ASPMC) 3307 if (reg & PCI_EXP_LNKCTL_ASPMC)
@@ -3383,7 +3399,7 @@ static void sky2_detach(struct net_device *dev)
3383 netif_tx_lock(dev); 3399 netif_tx_lock(dev);
3384 netif_device_detach(dev); /* stop txq */ 3400 netif_device_detach(dev); /* stop txq */
3385 netif_tx_unlock(dev); 3401 netif_tx_unlock(dev);
3386 sky2_down(dev); 3402 sky2_close(dev);
3387 } 3403 }
3388} 3404}
3389 3405
@@ -3393,7 +3409,7 @@ static int sky2_reattach(struct net_device *dev)
3393 int err = 0; 3409 int err = 0;
3394 3410
3395 if (netif_running(dev)) { 3411 if (netif_running(dev)) {
3396 err = sky2_up(dev); 3412 err = sky2_open(dev);
3397 if (err) { 3413 if (err) {
3398 netdev_info(dev, "could not restart %d\n", err); 3414 netdev_info(dev, "could not restart %d\n", err);
3399 dev_close(dev); 3415 dev_close(dev);
@@ -3410,10 +3426,13 @@ static void sky2_all_down(struct sky2_hw *hw)
3410{ 3426{
3411 int i; 3427 int i;
3412 3428
3413 sky2_read32(hw, B0_IMSK); 3429 if (hw->flags & SKY2_HW_IRQ_SETUP) {
3414 sky2_write32(hw, B0_IMSK, 0); 3430 sky2_read32(hw, B0_IMSK);
3415 synchronize_irq(hw->pdev->irq); 3431 sky2_write32(hw, B0_IMSK, 0);
3416 napi_disable(&hw->napi); 3432
3433 synchronize_irq(hw->pdev->irq);
3434 napi_disable(&hw->napi);
3435 }
3417 3436
3418 for (i = 0; i < hw->ports; i++) { 3437 for (i = 0; i < hw->ports; i++) {
3419 struct net_device *dev = hw->dev[i]; 3438 struct net_device *dev = hw->dev[i];
@@ -3446,11 +3465,12 @@ static void sky2_all_up(struct sky2_hw *hw)
3446 netif_wake_queue(dev); 3465 netif_wake_queue(dev);
3447 } 3466 }
3448 3467
3449 sky2_write32(hw, B0_IMSK, imask); 3468 if (hw->flags & SKY2_HW_IRQ_SETUP) {
3450 sky2_read32(hw, B0_IMSK); 3469 sky2_write32(hw, B0_IMSK, imask);
3451 3470 sky2_read32(hw, B0_IMSK);
3452 sky2_read32(hw, B0_Y2_SP_LISR); 3471 sky2_read32(hw, B0_Y2_SP_LISR);
3453 napi_enable(&hw->napi); 3472 napi_enable(&hw->napi);
3473 }
3454} 3474}
3455 3475
3456static void sky2_restart(struct work_struct *work) 3476static void sky2_restart(struct work_struct *work)
@@ -4071,6 +4091,16 @@ static int sky2_set_coalesce(struct net_device *dev,
4071 return 0; 4091 return 0;
4072} 4092}
4073 4093
4094/*
4095 * Hardware is limited to min of 128 and max of 2048 for ring size
4096 * and rounded up to next power of two
4097 * to avoid division in modulus calclation
4098 */
4099static unsigned long roundup_ring_size(unsigned long pending)
4100{
4101 return max(128ul, roundup_pow_of_two(pending+1));
4102}
4103
4074static void sky2_get_ringparam(struct net_device *dev, 4104static void sky2_get_ringparam(struct net_device *dev,
4075 struct ethtool_ringparam *ering) 4105 struct ethtool_ringparam *ering)
4076{ 4106{
@@ -4098,7 +4128,7 @@ static int sky2_set_ringparam(struct net_device *dev,
4098 4128
4099 sky2->rx_pending = ering->rx_pending; 4129 sky2->rx_pending = ering->rx_pending;
4100 sky2->tx_pending = ering->tx_pending; 4130 sky2->tx_pending = ering->tx_pending;
4101 sky2->tx_ring_size = roundup_pow_of_two(sky2->tx_pending+1); 4131 sky2->tx_ring_size = roundup_ring_size(sky2->tx_pending);
4102 4132
4103 return sky2_reattach(dev); 4133 return sky2_reattach(dev);
4104} 4134}
@@ -4556,7 +4586,7 @@ static int sky2_device_event(struct notifier_block *unused,
4556 struct net_device *dev = ptr; 4586 struct net_device *dev = ptr;
4557 struct sky2_port *sky2 = netdev_priv(dev); 4587 struct sky2_port *sky2 = netdev_priv(dev);
4558 4588
4559 if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug) 4589 if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug)
4560 return NOTIFY_DONE; 4590 return NOTIFY_DONE;
4561 4591
4562 switch (event) { 4592 switch (event) {
@@ -4621,8 +4651,8 @@ static __exit void sky2_debug_cleanup(void)
4621 not allowing netpoll on second port */ 4651 not allowing netpoll on second port */
4622static const struct net_device_ops sky2_netdev_ops[2] = { 4652static const struct net_device_ops sky2_netdev_ops[2] = {
4623 { 4653 {
4624 .ndo_open = sky2_up, 4654 .ndo_open = sky2_open,
4625 .ndo_stop = sky2_down, 4655 .ndo_stop = sky2_close,
4626 .ndo_start_xmit = sky2_xmit_frame, 4656 .ndo_start_xmit = sky2_xmit_frame,
4627 .ndo_do_ioctl = sky2_ioctl, 4657 .ndo_do_ioctl = sky2_ioctl,
4628 .ndo_validate_addr = eth_validate_addr, 4658 .ndo_validate_addr = eth_validate_addr,
@@ -4638,8 +4668,8 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
4638#endif 4668#endif
4639 }, 4669 },
4640 { 4670 {
4641 .ndo_open = sky2_up, 4671 .ndo_open = sky2_open,
4642 .ndo_stop = sky2_down, 4672 .ndo_stop = sky2_close,
4643 .ndo_start_xmit = sky2_xmit_frame, 4673 .ndo_start_xmit = sky2_xmit_frame,
4644 .ndo_do_ioctl = sky2_ioctl, 4674 .ndo_do_ioctl = sky2_ioctl,
4645 .ndo_validate_addr = eth_validate_addr, 4675 .ndo_validate_addr = eth_validate_addr,
@@ -4692,7 +4722,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4692 spin_lock_init(&sky2->phy_lock); 4722 spin_lock_init(&sky2->phy_lock);
4693 4723
4694 sky2->tx_pending = TX_DEF_PENDING; 4724 sky2->tx_pending = TX_DEF_PENDING;
4695 sky2->tx_ring_size = roundup_pow_of_two(TX_DEF_PENDING+1); 4725 sky2->tx_ring_size = roundup_ring_size(TX_DEF_PENDING);
4696 sky2->rx_pending = RX_DEF_PENDING; 4726 sky2->rx_pending = RX_DEF_PENDING;
4697 4727
4698 hw->dev[port] = dev; 4728 hw->dev[port] = dev;
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index 0af31b8b5f10..ff6f58bf822a 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2287,6 +2287,7 @@ struct sky2_hw {
2287#define SKY2_HW_RSS_BROKEN 0x00000100 2287#define SKY2_HW_RSS_BROKEN 0x00000100
2288#define SKY2_HW_VLAN_BROKEN 0x00000200 2288#define SKY2_HW_VLAN_BROKEN 0x00000200
2289#define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */ 2289#define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */
2290#define SKY2_HW_IRQ_SETUP 0x00000800
2290 2291
2291 u8 chip_id; 2292 u8 chip_id;
2292 u8 chip_rev; 2293 u8 chip_rev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b89c36dbf5b3..c2df6c358603 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -581,6 +581,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
581 * Packet is OK - process it. 581 * Packet is OK - process it.
582 */ 582 */
583 length = be32_to_cpu(cqe->byte_cnt); 583 length = be32_to_cpu(cqe->byte_cnt);
584 length -= ring->fcs_del;
584 ring->bytes += length; 585 ring->bytes += length;
585 ring->packets++; 586 ring->packets++;
586 587
@@ -813,8 +814,11 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
813 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); 814 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
814 815
815 /* Cancel FCS removal if FW allows */ 816 /* Cancel FCS removal if FW allows */
816 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) 817 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
817 context->param3 |= cpu_to_be32(1 << 29); 818 context->param3 |= cpu_to_be32(1 << 29);
819 ring->fcs_del = ETH_FCS_LEN;
820 } else
821 ring->fcs_del = 0;
818 822
819 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); 823 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
820 if (err) { 824 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 8fda331c65df..207b5add3ca8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -272,6 +272,7 @@ struct mlx4_en_rx_ring {
272 u32 prod; 272 u32 prod;
273 u32 cons; 273 u32 cons;
274 u32 buf_size; 274 u32 buf_size;
275 u8 fcs_del;
275 void *buf; 276 void *buf;
276 void *rx_info; 277 void *rx_info;
277 unsigned long bytes; 278 unsigned long bytes;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1dca57013cb2..1c61d36e6570 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -609,7 +609,7 @@ struct nv_ethtool_str {
609}; 609};
610 610
611static const struct nv_ethtool_str nv_estats_str[] = { 611static const struct nv_ethtool_str nv_estats_str[] = {
612 { "tx_bytes" }, 612 { "tx_bytes" }, /* includes Ethernet FCS CRC */
613 { "tx_zero_rexmt" }, 613 { "tx_zero_rexmt" },
614 { "tx_one_rexmt" }, 614 { "tx_one_rexmt" },
615 { "tx_many_rexmt" }, 615 { "tx_many_rexmt" },
@@ -637,7 +637,7 @@ static const struct nv_ethtool_str nv_estats_str[] = {
637 /* version 2 stats */ 637 /* version 2 stats */
638 { "tx_deferral" }, 638 { "tx_deferral" },
639 { "tx_packets" }, 639 { "tx_packets" },
640 { "rx_bytes" }, 640 { "rx_bytes" }, /* includes Ethernet FCS CRC */
641 { "tx_pause" }, 641 { "tx_pause" },
642 { "rx_pause" }, 642 { "rx_pause" },
643 { "rx_drop_frame" }, 643 { "rx_drop_frame" },
@@ -649,7 +649,7 @@ static const struct nv_ethtool_str nv_estats_str[] = {
649}; 649};
650 650
651struct nv_ethtool_stats { 651struct nv_ethtool_stats {
652 u64 tx_bytes; 652 u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
653 u64 tx_zero_rexmt; 653 u64 tx_zero_rexmt;
654 u64 tx_one_rexmt; 654 u64 tx_one_rexmt;
655 u64 tx_many_rexmt; 655 u64 tx_many_rexmt;
@@ -670,14 +670,14 @@ struct nv_ethtool_stats {
670 u64 rx_unicast; 670 u64 rx_unicast;
671 u64 rx_multicast; 671 u64 rx_multicast;
672 u64 rx_broadcast; 672 u64 rx_broadcast;
673 u64 rx_packets; 673 u64 rx_packets; /* should be ifconfig->rx_packets */
674 u64 rx_errors_total; 674 u64 rx_errors_total;
675 u64 tx_errors_total; 675 u64 tx_errors_total;
676 676
677 /* version 2 stats */ 677 /* version 2 stats */
678 u64 tx_deferral; 678 u64 tx_deferral;
679 u64 tx_packets; 679 u64 tx_packets; /* should be ifconfig->tx_packets */
680 u64 rx_bytes; 680 u64 rx_bytes; /* should be ifconfig->rx_bytes + 4*rx_packets */
681 u64 tx_pause; 681 u64 tx_pause;
682 u64 rx_pause; 682 u64 rx_pause;
683 u64 rx_drop_frame; 683 u64 rx_drop_frame;
@@ -1706,10 +1706,17 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1706 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { 1706 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
1707 nv_get_hw_stats(dev); 1707 nv_get_hw_stats(dev);
1708 1708
1709 /*
1710 * Note: because HW stats are not always available and
1711 * for consistency reasons, the following ifconfig
1712 * stats are managed by software: rx_bytes, tx_bytes,
1713 * rx_packets and tx_packets. The related hardware
1714 * stats reported by ethtool should be equivalent to
1715 * these ifconfig stats, with 4 additional bytes per
1716 * packet (Ethernet FCS CRC).
1717 */
1718
1709 /* copy to net_device stats */ 1719 /* copy to net_device stats */
1710 dev->stats.tx_packets = np->estats.tx_packets;
1711 dev->stats.rx_bytes = np->estats.rx_bytes;
1712 dev->stats.tx_bytes = np->estats.tx_bytes;
1713 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1720 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1714 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1721 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1715 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1722 dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
@@ -2380,6 +2387,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
2380 if (flags & NV_TX_ERROR) { 2387 if (flags & NV_TX_ERROR) {
2381 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) 2388 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2382 nv_legacybackoff_reseed(dev); 2389 nv_legacybackoff_reseed(dev);
2390 } else {
2391 dev->stats.tx_packets++;
2392 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2383 } 2393 }
2384 dev_kfree_skb_any(np->get_tx_ctx->skb); 2394 dev_kfree_skb_any(np->get_tx_ctx->skb);
2385 np->get_tx_ctx->skb = NULL; 2395 np->get_tx_ctx->skb = NULL;
@@ -2390,6 +2400,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
2390 if (flags & NV_TX2_ERROR) { 2400 if (flags & NV_TX2_ERROR) {
2391 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2401 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2392 nv_legacybackoff_reseed(dev); 2402 nv_legacybackoff_reseed(dev);
2403 } else {
2404 dev->stats.tx_packets++;
2405 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2393 } 2406 }
2394 dev_kfree_skb_any(np->get_tx_ctx->skb); 2407 dev_kfree_skb_any(np->get_tx_ctx->skb);
2395 np->get_tx_ctx->skb = NULL; 2408 np->get_tx_ctx->skb = NULL;
@@ -2429,6 +2442,9 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2429 else 2442 else
2430 nv_legacybackoff_reseed(dev); 2443 nv_legacybackoff_reseed(dev);
2431 } 2444 }
2445 } else {
2446 dev->stats.tx_packets++;
2447 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2432 } 2448 }
2433 2449
2434 dev_kfree_skb_any(np->get_tx_ctx->skb); 2450 dev_kfree_skb_any(np->get_tx_ctx->skb);
@@ -2678,6 +2694,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2678 skb->protocol = eth_type_trans(skb, dev); 2694 skb->protocol = eth_type_trans(skb, dev);
2679 napi_gro_receive(&np->napi, skb); 2695 napi_gro_receive(&np->napi, skb);
2680 dev->stats.rx_packets++; 2696 dev->stats.rx_packets++;
2697 dev->stats.rx_bytes += len;
2681next_pkt: 2698next_pkt:
2682 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2699 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2683 np->get_rx.orig = np->first_rx.orig; 2700 np->get_rx.orig = np->first_rx.orig;
@@ -2761,6 +2778,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2761 } 2778 }
2762 napi_gro_receive(&np->napi, skb); 2779 napi_gro_receive(&np->napi, skb);
2763 dev->stats.rx_packets++; 2780 dev->stats.rx_packets++;
2781 dev->stats.rx_bytes += len;
2764 } else { 2782 } else {
2765 dev_kfree_skb(skb); 2783 dev_kfree_skb(skb);
2766 } 2784 }
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 9c075ea2682e..9cb5f912e489 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -18,8 +18,8 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/module.h> /* for __MODULE_STRING */
22#include "pch_gbe.h" 21#include "pch_gbe.h"
22#include <linux/module.h> /* for __MODULE_STRING */
23 23
24#define OPTION_UNSET -1 24#define OPTION_UNSET -1
25#define OPTION_DISABLED 0 25#define OPTION_DISABLED 0
diff --git a/drivers/net/ethernet/pasemi/Makefile b/drivers/net/ethernet/pasemi/Makefile
index 05db5434bafc..90497ffb1ac3 100644
--- a/drivers/net/ethernet/pasemi/Makefile
+++ b/drivers/net/ethernet/pasemi/Makefile
@@ -2,4 +2,5 @@
2# Makefile for the A Semi network device drivers. 2# Makefile for the A Semi network device drivers.
3# 3#
4 4
5obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o 5obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
6pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 8731f79c9efc..b8478aab050e 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -58,10 +58,8 @@
58 58
59 59
60#define TX_DESC_PER_IOCB 8 60#define TX_DESC_PER_IOCB 8
61/* The maximum number of frags we handle is based 61
62 * on PAGE_SIZE... 62#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
63 */
64#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
65#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) 63#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
66#else /* all other page sizes */ 64#else /* all other page sizes */
67#define TX_DESC_PER_OAL 0 65#define TX_DESC_PER_OAL 0
@@ -1353,7 +1351,7 @@ struct tx_ring_desc {
1353 struct ob_mac_iocb_req *queue_entry; 1351 struct ob_mac_iocb_req *queue_entry;
1354 u32 index; 1352 u32 index;
1355 struct oal oal; 1353 struct oal oal;
1356 struct map_list map[MAX_SKB_FRAGS + 1]; 1354 struct map_list map[MAX_SKB_FRAGS + 2];
1357 int map_cnt; 1355 int map_cnt;
1358 struct tx_ring_desc *next; 1356 struct tx_ring_desc *next;
1359}; 1357};
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 1fc01ca72b46..4bf68cfef390 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -940,7 +940,7 @@ static void r6040_multicast_list(struct net_device *dev)
940 iowrite16(lp->mcr0, ioaddr + MCR0); 940 iowrite16(lp->mcr0, ioaddr + MCR0);
941 941
942 /* Fill the MAC hash tables with their values */ 942 /* Fill the MAC hash tables with their values */
943 if (lp->mcr0 && MCR0_HASH_EN) { 943 if (lp->mcr0 & MCR0_HASH_EN) {
944 iowrite16(hash_table[0], ioaddr + MAR0); 944 iowrite16(hash_table[0], ioaddr + MAR0);
945 iowrite16(hash_table[1], ioaddr + MAR1); 945 iowrite16(hash_table[1], ioaddr + MAR1);
946 iowrite16(hash_table[2], ioaddr + MAR2); 946 iowrite16(hash_table[2], ioaddr + MAR2);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 92b45f08858f..67bf07819992 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1183,11 +1183,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
1183 return value; 1183 return value;
1184} 1184}
1185 1185
1186static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) 1186static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1187{ 1187{
1188 RTL_W16(IntrMask, 0x0000); 1188 void __iomem *ioaddr = tp->mmio_addr;
1189 1189
1190 RTL_W16(IntrStatus, 0xffff); 1190 RTL_W16(IntrMask, 0x0000);
1191 RTL_W16(IntrStatus, tp->intr_event);
1192 RTL_R8(ChipCmd);
1191} 1193}
1192 1194
1193static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) 1195static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
@@ -1292,7 +1294,7 @@ static void __rtl8169_check_link_status(struct net_device *dev,
1292 netif_carrier_off(dev); 1294 netif_carrier_off(dev);
1293 netif_info(tp, ifdown, dev, "link down\n"); 1295 netif_info(tp, ifdown, dev, "link down\n");
1294 if (pm) 1296 if (pm)
1295 pm_schedule_suspend(&tp->pci_dev->dev, 100); 1297 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1296 } 1298 }
1297 spin_unlock_irqrestore(&tp->lock, flags); 1299 spin_unlock_irqrestore(&tp->lock, flags);
1298} 1300}
@@ -3933,8 +3935,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
3933 break; 3935 break;
3934 udelay(100); 3936 udelay(100);
3935 } 3937 }
3936
3937 rtl8169_init_ring_indexes(tp);
3938} 3938}
3939 3939
3940static int __devinit 3940static int __devinit
@@ -4339,7 +4339,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
4339 void __iomem *ioaddr = tp->mmio_addr; 4339 void __iomem *ioaddr = tp->mmio_addr;
4340 4340
4341 /* Disable interrupts */ 4341 /* Disable interrupts */
4342 rtl8169_irq_mask_and_ack(ioaddr); 4342 rtl8169_irq_mask_and_ack(tp);
4343 4343
4344 rtl_rx_close(tp); 4344 rtl_rx_close(tp);
4345 4345
@@ -4885,8 +4885,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
4885 RTL_W16(IntrMitigate, 0x5151); 4885 RTL_W16(IntrMitigate, 0x5151);
4886 4886
4887 /* Work around for RxFIFO overflow. */ 4887 /* Work around for RxFIFO overflow. */
4888 if (tp->mac_version == RTL_GIGA_MAC_VER_11 || 4888 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
4889 tp->mac_version == RTL_GIGA_MAC_VER_22) {
4890 tp->intr_event |= RxFIFOOver | PCSTimeout; 4889 tp->intr_event |= RxFIFOOver | PCSTimeout;
4891 tp->intr_event &= ~RxOverflow; 4890 tp->intr_event &= ~RxOverflow;
4892 } 4891 }
@@ -5076,6 +5075,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
5076 void __iomem *ioaddr = tp->mmio_addr; 5075 void __iomem *ioaddr = tp->mmio_addr;
5077 struct pci_dev *pdev = tp->pci_dev; 5076 struct pci_dev *pdev = tp->pci_dev;
5078 5077
5078 if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
5079 tp->intr_event &= ~RxFIFOOver;
5080 tp->napi_event &= ~RxFIFOOver;
5081 }
5082
5079 if (tp->mac_version == RTL_GIGA_MAC_VER_13 || 5083 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5080 tp->mac_version == RTL_GIGA_MAC_VER_16) { 5084 tp->mac_version == RTL_GIGA_MAC_VER_16) {
5081 int cap = pci_pcie_cap(pdev); 5085 int cap = pci_pcie_cap(pdev);
@@ -5342,7 +5346,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
5342 /* Wait for any pending NAPI task to complete */ 5346 /* Wait for any pending NAPI task to complete */
5343 napi_disable(&tp->napi); 5347 napi_disable(&tp->napi);
5344 5348
5345 rtl8169_irq_mask_and_ack(ioaddr); 5349 rtl8169_irq_mask_and_ack(tp);
5346 5350
5347 tp->intr_mask = 0xffff; 5351 tp->intr_mask = 0xffff;
5348 RTL_W16(IntrMask, tp->intr_event); 5352 RTL_W16(IntrMask, tp->intr_event);
@@ -5389,14 +5393,16 @@ static void rtl8169_reset_task(struct work_struct *work)
5389 if (!netif_running(dev)) 5393 if (!netif_running(dev))
5390 goto out_unlock; 5394 goto out_unlock;
5391 5395
5396 rtl8169_hw_reset(tp);
5397
5392 rtl8169_wait_for_quiescence(dev); 5398 rtl8169_wait_for_quiescence(dev);
5393 5399
5394 for (i = 0; i < NUM_RX_DESC; i++) 5400 for (i = 0; i < NUM_RX_DESC; i++)
5395 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); 5401 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5396 5402
5397 rtl8169_tx_clear(tp); 5403 rtl8169_tx_clear(tp);
5404 rtl8169_init_ring_indexes(tp);
5398 5405
5399 rtl8169_hw_reset(tp);
5400 rtl_hw_start(dev); 5406 rtl_hw_start(dev);
5401 netif_wake_queue(dev); 5407 netif_wake_queue(dev);
5402 rtl8169_check_link_status(dev, tp, tp->mmio_addr); 5408 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@@ -5407,11 +5413,6 @@ out_unlock:
5407 5413
5408static void rtl8169_tx_timeout(struct net_device *dev) 5414static void rtl8169_tx_timeout(struct net_device *dev)
5409{ 5415{
5410 struct rtl8169_private *tp = netdev_priv(dev);
5411
5412 rtl8169_hw_reset(tp);
5413
5414 /* Let's wait a bit while any (async) irq lands on */
5415 rtl8169_schedule_work(dev, rtl8169_reset_task); 5416 rtl8169_schedule_work(dev, rtl8169_reset_task);
5416} 5417}
5417 5418
@@ -5804,6 +5805,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5804 */ 5805 */
5805 status = RTL_R16(IntrStatus); 5806 status = RTL_R16(IntrStatus);
5806 while (status && status != 0xffff) { 5807 while (status && status != 0xffff) {
5808 status &= tp->intr_event;
5809 if (!status)
5810 break;
5811
5807 handled = 1; 5812 handled = 1;
5808 5813
5809 /* Handle all of the error cases first. These will reset 5814 /* Handle all of the error cases first. These will reset
@@ -5818,27 +5823,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5818 switch (tp->mac_version) { 5823 switch (tp->mac_version) {
5819 /* Work around for rx fifo overflow */ 5824 /* Work around for rx fifo overflow */
5820 case RTL_GIGA_MAC_VER_11: 5825 case RTL_GIGA_MAC_VER_11:
5821 case RTL_GIGA_MAC_VER_22:
5822 case RTL_GIGA_MAC_VER_26:
5823 netif_stop_queue(dev); 5826 netif_stop_queue(dev);
5824 rtl8169_tx_timeout(dev); 5827 rtl8169_tx_timeout(dev);
5825 goto done; 5828 goto done;
5826 /* Testers needed. */
5827 case RTL_GIGA_MAC_VER_17:
5828 case RTL_GIGA_MAC_VER_19:
5829 case RTL_GIGA_MAC_VER_20:
5830 case RTL_GIGA_MAC_VER_21:
5831 case RTL_GIGA_MAC_VER_23:
5832 case RTL_GIGA_MAC_VER_24:
5833 case RTL_GIGA_MAC_VER_27:
5834 case RTL_GIGA_MAC_VER_28:
5835 case RTL_GIGA_MAC_VER_31:
5836 /* Experimental science. Pktgen proof. */
5837 case RTL_GIGA_MAC_VER_12:
5838 case RTL_GIGA_MAC_VER_25:
5839 if (status == RxFIFOOver)
5840 goto done;
5841 break;
5842 default: 5829 default:
5843 break; 5830 break;
5844 } 5831 }
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index d2be42aafbef..8843071fe987 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1937,6 +1937,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
1937{ 1937{
1938 struct smsc911x_data *pdata = netdev_priv(dev); 1938 struct smsc911x_data *pdata = netdev_priv(dev);
1939 unsigned int byte_test; 1939 unsigned int byte_test;
1940 unsigned int to = 100;
1940 1941
1941 SMSC_TRACE(pdata, probe, "Driver Parameters:"); 1942 SMSC_TRACE(pdata, probe, "Driver Parameters:");
1942 SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX", 1943 SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX",
@@ -1952,6 +1953,17 @@ static int __devinit smsc911x_init(struct net_device *dev)
1952 return -ENODEV; 1953 return -ENODEV;
1953 } 1954 }
1954 1955
1956 /*
1957 * poll the READY bit in PMT_CTRL. Any other access to the device is
1958 * forbidden while this bit isn't set. Try for 100ms
1959 */
1960 while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
1961 udelay(1000);
1962 if (to == 0) {
1963 pr_err("Device not READY in 100ms aborting\n");
1964 return -ENODEV;
1965 }
1966
1955 /* Check byte ordering */ 1967 /* Check byte ordering */
1956 byte_test = smsc911x_reg_read(pdata, BYTE_TEST); 1968 byte_test = smsc911x_reg_read(pdata, BYTE_TEST);
1957 SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test); 1969 SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index da66ac511c4c..4d5402a1d262 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -39,10 +39,11 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
39 /* DMA SW reset */ 39 /* DMA SW reset */
40 value |= DMA_BUS_MODE_SFT_RESET; 40 value |= DMA_BUS_MODE_SFT_RESET;
41 writel(value, ioaddr + DMA_BUS_MODE); 41 writel(value, ioaddr + DMA_BUS_MODE);
42 limit = 15000; 42 limit = 10;
43 while (limit--) { 43 while (limit--) {
44 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) 44 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
45 break; 45 break;
46 mdelay(10);
46 } 47 }
47 if (limit < 0) 48 if (limit < 0)
48 return -EBUSY; 49 return -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 627f656b0f3c..bc17fd08b55d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -41,10 +41,11 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
41 /* DMA SW reset */ 41 /* DMA SW reset */
42 value |= DMA_BUS_MODE_SFT_RESET; 42 value |= DMA_BUS_MODE_SFT_RESET;
43 writel(value, ioaddr + DMA_BUS_MODE); 43 writel(value, ioaddr + DMA_BUS_MODE);
44 limit = 15000; 44 limit = 10;
45 while (limit--) { 45 while (limit--) {
46 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) 46 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
47 break; 47 break;
48 mdelay(10);
48 } 49 }
49 if (limit < 0) 50 if (limit < 0)
50 return -EBUSY; 51 return -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 9bafa6cf9e8b..a140a8fbf051 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -72,7 +72,6 @@ struct stmmac_priv {
72 spinlock_t lock; 72 spinlock_t lock;
73 spinlock_t tx_lock; 73 spinlock_t tx_lock;
74 int wolopts; 74 int wolopts;
75 int wolenabled;
76 int wol_irq; 75 int wol_irq;
77#ifdef CONFIG_STMMAC_TIMER 76#ifdef CONFIG_STMMAC_TIMER
78 struct stmmac_timer *tm; 77 struct stmmac_timer *tm;
@@ -80,6 +79,7 @@ struct stmmac_priv {
80 struct plat_stmmacenet_data *plat; 79 struct plat_stmmacenet_data *plat;
81 struct stmmac_counters mmc; 80 struct stmmac_counters mmc;
82 struct dma_features dma_cap; 81 struct dma_features dma_cap;
82 int hw_cap_support;
83}; 83};
84 84
85extern int stmmac_mdio_unregister(struct net_device *ndev); 85extern int stmmac_mdio_unregister(struct net_device *ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index e8eff09bbbd7..0395f9eba801 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -430,6 +430,12 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
430 struct stmmac_priv *priv = netdev_priv(dev); 430 struct stmmac_priv *priv = netdev_priv(dev);
431 u32 support = WAKE_MAGIC | WAKE_UCAST; 431 u32 support = WAKE_MAGIC | WAKE_UCAST;
432 432
433 /* By default almost all GMAC devices support the WoL via
434 * magic frame but we can disable it if the HW capability
435 * register shows no support for pmt_magic_frame. */
436 if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
437 wol->wolopts &= ~WAKE_MAGIC;
438
433 if (!device_can_wakeup(priv->device)) 439 if (!device_can_wakeup(priv->device))
434 return -EINVAL; 440 return -EINVAL;
435 441
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 20546bbbb8db..72cd190b9c1a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -321,12 +321,10 @@ static int stmmac_init_phy(struct net_device *dev)
321 } 321 }
322 322
323 /* Stop Advertising 1000BASE Capability if interface is not GMII */ 323 /* Stop Advertising 1000BASE Capability if interface is not GMII */
324 if ((interface) && ((interface == PHY_INTERFACE_MODE_MII) || 324 if ((interface == PHY_INTERFACE_MODE_MII) ||
325 (interface == PHY_INTERFACE_MODE_RMII))) { 325 (interface == PHY_INTERFACE_MODE_RMII))
326 phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | 326 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
327 SUPPORTED_Asym_Pause); 327 SUPPORTED_1000baseT_Full);
328 phydev->advertising = phydev->supported;
329 }
330 328
331 /* 329 /*
332 * Broken HW is sometimes missing the pull-up resistor on the 330 * Broken HW is sometimes missing the pull-up resistor on the
@@ -783,10 +781,15 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
783 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 781 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
784 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 782 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
785 783
786 /* Do not manage MMC IRQ (FIXME) */ 784 /* Mask MMC irq, counters are managed in SW and registers
785 * are cleared on each READ eventually. */
787 dwmac_mmc_intr_all_mask(priv->ioaddr); 786 dwmac_mmc_intr_all_mask(priv->ioaddr);
788 dwmac_mmc_ctrl(priv->ioaddr, mode); 787
789 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 788 if (priv->dma_cap.rmon) {
789 dwmac_mmc_ctrl(priv->ioaddr, mode);
790 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
791 } else
792 pr_info(" No MAC Management Counters available");
790} 793}
791 794
792static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) 795static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
@@ -807,8 +810,29 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
807 return 0; 810 return 0;
808} 811}
809 812
810/* New GMAC chips support a new register to indicate the 813/**
811 * presence of the optional feature/functions. 814 * stmmac_selec_desc_mode
815 * @dev : device pointer
816 * Description: select the Enhanced/Alternate or Normal descriptors */
817static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
818{
819 if (priv->plat->enh_desc) {
820 pr_info(" Enhanced/Alternate descriptors\n");
821 priv->hw->desc = &enh_desc_ops;
822 } else {
823 pr_info(" Normal descriptors\n");
824 priv->hw->desc = &ndesc_ops;
825 }
826}
827
828/**
829 * stmmac_get_hw_features
830 * @priv : private device pointer
831 * Description:
832 * new GMAC chip generations have a new register to indicate the
833 * presence of the optional feature/functions.
834 * This can be also used to override the value passed through the
835 * platform and necessary for old MAC10/100 and GMAC chips.
812 */ 836 */
813static int stmmac_get_hw_features(struct stmmac_priv *priv) 837static int stmmac_get_hw_features(struct stmmac_priv *priv)
814{ 838{
@@ -829,7 +853,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
829 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; 853 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
830 priv->dma_cap.pmt_magic_frame = 854 priv->dma_cap.pmt_magic_frame =
831 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; 855 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
832 /*MMC*/ 856 /* MMC */
833 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; 857 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
834 /* IEEE 1588-2002*/ 858 /* IEEE 1588-2002*/
835 priv->dma_cap.time_stamp = 859 priv->dma_cap.time_stamp =
@@ -857,8 +881,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
857 priv->dma_cap.enh_desc = 881 priv->dma_cap.enh_desc =
858 (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; 882 (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
859 883
860 } else 884 }
861 pr_debug("\tNo HW DMA feature register supported");
862 885
863 return hw_cap; 886 return hw_cap;
864} 887}
@@ -913,6 +936,44 @@ static int stmmac_open(struct net_device *dev)
913 goto open_error; 936 goto open_error;
914 } 937 }
915 938
939 stmmac_get_synopsys_id(priv);
940
941 priv->hw_cap_support = stmmac_get_hw_features(priv);
942
943 if (priv->hw_cap_support) {
944 pr_info(" Support DMA HW capability register");
945
946 /* We can override some gmac/dma configuration fields: e.g.
947 * enh_desc, tx_coe (e.g. that are passed through the
948 * platform) with the values from the HW capability
949 * register (if supported).
950 */
951 priv->plat->enh_desc = priv->dma_cap.enh_desc;
952 priv->plat->tx_coe = priv->dma_cap.tx_coe;
953 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
954
955 /* By default disable wol on magic frame if not supported */
956 if (!priv->dma_cap.pmt_magic_frame)
957 priv->wolopts &= ~WAKE_MAGIC;
958
959 } else
960 pr_info(" No HW DMA feature register supported");
961
962 /* Select the enhnaced/normal descriptor structures */
963 stmmac_selec_desc_mode(priv);
964
965 /* PMT module is not integrated in all the MAC devices. */
966 if (priv->plat->pmt) {
967 pr_info(" Remote wake-up capable\n");
968 device_set_wakeup_capable(priv->device, 1);
969 }
970
971 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
972 if (priv->rx_coe)
973 pr_info(" Checksum Offload Engine supported\n");
974 if (priv->plat->tx_coe)
975 pr_info(" Checksum insertion supported\n");
976
916 /* Create and initialize the TX/RX descriptors chains. */ 977 /* Create and initialize the TX/RX descriptors chains. */
917 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); 978 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
918 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); 979 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
@@ -935,15 +996,6 @@ static int stmmac_open(struct net_device *dev)
935 /* Initialize the MAC Core */ 996 /* Initialize the MAC Core */
936 priv->hw->mac->core_init(priv->ioaddr); 997 priv->hw->mac->core_init(priv->ioaddr);
937 998
938 stmmac_get_synopsys_id(priv);
939
940 stmmac_get_hw_features(priv);
941
942 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
943 if (priv->rx_coe)
944 pr_info("stmmac: Rx Checksum Offload Engine supported\n");
945 if (priv->plat->tx_coe)
946 pr_info("\tTX Checksum insertion supported\n");
947 netdev_update_features(dev); 999 netdev_update_features(dev);
948 1000
949 /* Request the IRQ lines */ 1001 /* Request the IRQ lines */
@@ -965,8 +1017,7 @@ static int stmmac_open(struct net_device *dev)
965 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 1017 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
966 priv->xstats.threshold = tc; 1018 priv->xstats.threshold = tc;
967 1019
968 if (priv->dma_cap.rmon) 1020 stmmac_mmc_setup(priv);
969 stmmac_mmc_setup(priv);
970 1021
971 /* Start the ball rolling... */ 1022 /* Start the ball rolling... */
972 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); 1023 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
@@ -1489,9 +1540,7 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1489 if (!priv->phydev) 1540 if (!priv->phydev)
1490 return -EINVAL; 1541 return -EINVAL;
1491 1542
1492 spin_lock(&priv->lock);
1493 ret = phy_mii_ioctl(priv->phydev, rq, cmd); 1543 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
1494 spin_unlock(&priv->lock);
1495 1544
1496 return ret; 1545 return ret;
1497} 1546}
@@ -1558,7 +1607,7 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
1558 struct net_device *dev = seq->private; 1607 struct net_device *dev = seq->private;
1559 struct stmmac_priv *priv = netdev_priv(dev); 1608 struct stmmac_priv *priv = netdev_priv(dev);
1560 1609
1561 if (!stmmac_get_hw_features(priv)) { 1610 if (!priv->hw_cap_support) {
1562 seq_printf(seq, "DMA HW features not supported\n"); 1611 seq_printf(seq, "DMA HW features not supported\n");
1563 return 0; 1612 return 0;
1564 } 1613 }
@@ -1766,12 +1815,6 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1766 if (!device) 1815 if (!device)
1767 return -ENOMEM; 1816 return -ENOMEM;
1768 1817
1769 if (priv->plat->enh_desc) {
1770 device->desc = &enh_desc_ops;
1771 pr_info("\tEnhanced descriptor structure\n");
1772 } else
1773 device->desc = &ndesc_ops;
1774
1775 priv->hw = device; 1818 priv->hw = device;
1776 priv->hw->ring = &ring_mode_ops; 1819 priv->hw->ring = &ring_mode_ops;
1777 1820
@@ -1845,11 +1888,6 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1845 1888
1846 priv->ioaddr = addr; 1889 priv->ioaddr = addr;
1847 1890
1848 /* PMT module is not integrated in all the MAC devices. */
1849 if (plat_dat->pmt) {
1850 pr_info("\tPMT module supported\n");
1851 device_set_wakeup_capable(&pdev->dev, 1);
1852 }
1853 /* 1891 /*
1854 * On some platforms e.g. SPEAr the wake up irq differs from the mac irq 1892 * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
1855 * The external wake up irq can be passed through the platform code 1893 * The external wake up irq can be passed through the platform code
@@ -1862,7 +1900,6 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1862 if (priv->wol_irq == -ENXIO) 1900 if (priv->wol_irq == -ENXIO)
1863 priv->wol_irq = ndev->irq; 1901 priv->wol_irq = ndev->irq;
1864 1902
1865
1866 platform_set_drvdata(pdev, ndev); 1903 platform_set_drvdata(pdev, ndev);
1867 1904
1868 /* Set the I/O base addr */ 1905 /* Set the I/O base addr */
@@ -1875,7 +1912,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1875 goto out_free_ndev; 1912 goto out_free_ndev;
1876 } 1913 }
1877 1914
1878 /* MAC HW revice detection */ 1915 /* MAC HW device detection */
1879 ret = stmmac_mac_device_setup(ndev); 1916 ret = stmmac_mac_device_setup(ndev);
1880 if (ret < 0) 1917 if (ret < 0)
1881 goto out_plat_exit; 1918 goto out_plat_exit;
@@ -1978,12 +2015,13 @@ static int stmmac_suspend(struct device *dev)
1978 if (!ndev || !netif_running(ndev)) 2015 if (!ndev || !netif_running(ndev))
1979 return 0; 2016 return 0;
1980 2017
2018 if (priv->phydev)
2019 phy_stop(priv->phydev);
2020
1981 spin_lock(&priv->lock); 2021 spin_lock(&priv->lock);
1982 2022
1983 netif_device_detach(ndev); 2023 netif_device_detach(ndev);
1984 netif_stop_queue(ndev); 2024 netif_stop_queue(ndev);
1985 if (priv->phydev)
1986 phy_stop(priv->phydev);
1987 2025
1988#ifdef CONFIG_STMMAC_TIMER 2026#ifdef CONFIG_STMMAC_TIMER
1989 priv->tm->timer_stop(); 2027 priv->tm->timer_stop();
@@ -2041,12 +2079,13 @@ static int stmmac_resume(struct device *dev)
2041#endif 2079#endif
2042 napi_enable(&priv->napi); 2080 napi_enable(&priv->napi);
2043 2081
2044 if (priv->phydev)
2045 phy_start(priv->phydev);
2046
2047 netif_start_queue(ndev); 2082 netif_start_queue(ndev);
2048 2083
2049 spin_unlock(&priv->lock); 2084 spin_unlock(&priv->lock);
2085
2086 if (priv->phydev)
2087 phy_start(priv->phydev);
2088
2050 return 0; 2089 return 0;
2051} 2090}
2052 2091
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index c517dac02ae1..cf14ab9db576 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2637,7 +2637,7 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i
2637 sbus_dp = op->dev.parent->of_node; 2637 sbus_dp = op->dev.parent->of_node;
2638 2638
2639 /* We can match PCI devices too, do not accept those here. */ 2639 /* We can match PCI devices too, do not accept those here. */
2640 if (strcmp(sbus_dp->name, "sbus")) 2640 if (strcmp(sbus_dp->name, "sbus") && strcmp(sbus_dp->name, "sbi"))
2641 return err; 2641 return err;
2642 2642
2643 if (is_qfe) { 2643 if (is_qfe) {
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 10826d8a2a2d..1187a1169eb2 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
926 goto done; 926 goto done;
927 927
928 /* Re-enable the ingress interrupt. */ 928 /* Re-enable the ingress interrupt. */
929 enable_percpu_irq(priv->intr_id); 929 enable_percpu_irq(priv->intr_id, 0);
930 930
931 /* HACK: Avoid the "rotting packet" problem (see above). */ 931 /* HACK: Avoid the "rotting packet" problem (see above). */
932 if (qup->__packet_receive_read != 932 if (qup->__packet_receive_read !=
@@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr)
1296 info->napi_enabled = true; 1296 info->napi_enabled = true;
1297 1297
1298 /* Enable the ingress interrupt. */ 1298 /* Enable the ingress interrupt. */
1299 enable_percpu_irq(priv->intr_id); 1299 enable_percpu_irq(priv->intr_id, 0);
1300} 1300}
1301 1301
1302 1302
@@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1697 for (i = 0; i < sh->nr_frags; i++) { 1697 for (i = 0; i < sh->nr_frags; i++) {
1698 1698
1699 skb_frag_t *f = &sh->frags[i]; 1699 skb_frag_t *f = &sh->frags[i];
1700 unsigned long pfn = page_to_pfn(f->page); 1700 unsigned long pfn = page_to_pfn(skb_frag_page(f));
1701 1701
1702 /* FIXME: Compute "hash_for_home" properly. */ 1702 /* FIXME: Compute "hash_for_home" properly. */
1703 /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ 1703 /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
@@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1706 /* FIXME: Hmmm. */ 1706 /* FIXME: Hmmm. */
1707 if (!hash_default) { 1707 if (!hash_default) {
1708 void *va = pfn_to_kaddr(pfn) + f->page_offset; 1708 void *va = pfn_to_kaddr(pfn) + f->page_offset;
1709 BUG_ON(PageHighMem(f->page)); 1709 BUG_ON(PageHighMem(skb_frag_page(f)));
1710 finv_buffer_remote(va, f->size, 0); 1710 finv_buffer_remote(va, f->size, 0);
1711 } 1711 }
1712 1712
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index caf3659e173c..2681b53820ee 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -114,6 +114,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
114 return; 114 return;
115 temac_iow(lp, XTE_LSW0_OFFSET, value); 115 temac_iow(lp, XTE_LSW0_OFFSET, value);
116 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); 116 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
117 temac_indirect_busywait(lp);
117} 118}
118 119
119/** 120/**
@@ -203,6 +204,9 @@ static void temac_dma_bd_release(struct net_device *ndev)
203 struct temac_local *lp = netdev_priv(ndev); 204 struct temac_local *lp = netdev_priv(ndev);
204 int i; 205 int i;
205 206
207 /* Reset Local Link (DMA) */
208 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
209
206 for (i = 0; i < RX_BD_NUM; i++) { 210 for (i = 0; i < RX_BD_NUM; i++) {
207 if (!lp->rx_skb[i]) 211 if (!lp->rx_skb[i])
208 break; 212 break;
@@ -860,6 +864,8 @@ static int temac_open(struct net_device *ndev)
860 phy_start(lp->phy_dev); 864 phy_start(lp->phy_dev);
861 } 865 }
862 866
867 temac_device_reset(ndev);
868
863 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev); 869 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
864 if (rc) 870 if (rc)
865 goto err_tx_irq; 871 goto err_tx_irq;
@@ -867,7 +873,6 @@ static int temac_open(struct net_device *ndev)
867 if (rc) 873 if (rc)
868 goto err_rx_irq; 874 goto err_rx_irq;
869 875
870 temac_device_reset(ndev);
871 return 0; 876 return 0;
872 877
873 err_rx_irq: 878 err_rx_irq:
diff --git a/drivers/net/hippi/Kconfig b/drivers/net/hippi/Kconfig
index 7393eb732ee6..95eb34fdbba7 100644
--- a/drivers/net/hippi/Kconfig
+++ b/drivers/net/hippi/Kconfig
@@ -36,4 +36,4 @@ config ROADRUNNER_LARGE_RINGS
36 kernel code or by user space programs. Say Y here only if you have 36 kernel code or by user space programs. Say Y here only if you have
37 the memory. 37 the memory.
38 38
39endif /* HIPPI */ 39endif # HIPPI
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index bb88e12101c7..a70244306c94 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5menuconfig PHYLIB 5menuconfig PHYLIB
6 bool "PHY Device support and infrastructure" 6 tristate "PHY Device support and infrastructure"
7 depends on !S390 7 depends on !S390
8 depends on NETDEVICES 8 depends on NETDEVICES
9 help 9 help
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 89f829f5f725..f8a6853b692e 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -423,10 +423,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
423 lock_sock(sk); 423 lock_sock(sk);
424 424
425 opt->src_addr = sp->sa_addr.pptp; 425 opt->src_addr = sp->sa_addr.pptp;
426 if (add_chan(po)) { 426 if (add_chan(po))
427 release_sock(sk);
428 error = -EBUSY; 427 error = -EBUSY;
429 }
430 428
431 release_sock(sk); 429 release_sock(sk);
432 return error; 430 return error;
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index e81e22e3d1d2..e6fed4d4cb77 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -36,7 +36,7 @@
36#include <linux/usb/usbnet.h> 36#include <linux/usb/usbnet.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38 38
39#define DRIVER_VERSION "26-Sep-2011" 39#define DRIVER_VERSION "08-Nov-2011"
40#define DRIVER_NAME "asix" 40#define DRIVER_NAME "asix"
41 41
42/* ASIX AX8817X based USB 2.0 Ethernet Devices */ 42/* ASIX AX8817X based USB 2.0 Ethernet Devices */
@@ -163,7 +163,7 @@
163#define MARVELL_CTRL_TXDELAY 0x0002 163#define MARVELL_CTRL_TXDELAY 0x0002
164#define MARVELL_CTRL_RXDELAY 0x0080 164#define MARVELL_CTRL_RXDELAY 0x0080
165 165
166#define PHY_MODE_RTL8211CL 0x0004 166#define PHY_MODE_RTL8211CL 0x000C
167 167
168/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */ 168/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
169struct asix_data { 169struct asix_data {
@@ -652,9 +652,17 @@ static u32 asix_get_phyid(struct usbnet *dev)
652{ 652{
653 int phy_reg; 653 int phy_reg;
654 u32 phy_id; 654 u32 phy_id;
655 int i;
655 656
656 phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1); 657 /* Poll for the rare case the FW or phy isn't ready yet. */
657 if (phy_reg < 0) 658 for (i = 0; i < 100; i++) {
659 phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
660 if (phy_reg != 0 && phy_reg != 0xFFFF)
661 break;
662 mdelay(1);
663 }
664
665 if (phy_reg <= 0 || phy_reg == 0xFFFF)
658 return 0; 666 return 0;
659 667
660 phy_id = (phy_reg & 0xffff) << 16; 668 phy_id = (phy_reg & 0xffff) << 16;
@@ -1075,7 +1083,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
1075 1083
1076static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) 1084static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
1077{ 1085{
1078 int ret; 1086 int ret, embd_phy;
1079 struct asix_data *data = (struct asix_data *)&dev->data; 1087 struct asix_data *data = (struct asix_data *)&dev->data;
1080 u8 buf[ETH_ALEN]; 1088 u8 buf[ETH_ALEN];
1081 u32 phyid; 1089 u32 phyid;
@@ -1100,16 +1108,36 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
1100 dev->mii.reg_num_mask = 0x1f; 1108 dev->mii.reg_num_mask = 0x1f;
1101 dev->mii.phy_id = asix_get_phy_addr(dev); 1109 dev->mii.phy_id = asix_get_phy_addr(dev);
1102 1110
1103 phyid = asix_get_phyid(dev);
1104 dbg("PHYID=0x%08x", phyid);
1105
1106 dev->net->netdev_ops = &ax88772_netdev_ops; 1111 dev->net->netdev_ops = &ax88772_netdev_ops;
1107 dev->net->ethtool_ops = &ax88772_ethtool_ops; 1112 dev->net->ethtool_ops = &ax88772_ethtool_ops;
1108 1113
1109 ret = ax88772_reset(dev); 1114 embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
1115
1116 /* Reset the PHY to normal operation mode */
1117 ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
1118 if (ret < 0) {
1119 dbg("Select PHY #1 failed: %d", ret);
1120 return ret;
1121 }
1122
1123 ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
1124 if (ret < 0)
1125 return ret;
1126
1127 msleep(150);
1128
1129 ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
1110 if (ret < 0) 1130 if (ret < 0)
1111 return ret; 1131 return ret;
1112 1132
1133 msleep(150);
1134
1135 ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
1136
1137 /* Read PHYID register *AFTER* the PHY was reset properly */
1138 phyid = asix_get_phyid(dev);
1139 dbg("PHYID=0x%08x", phyid);
1140
1113 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ 1141 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
1114 if (dev->driver_info->flags & FLAG_FRAMING_AX) { 1142 if (dev->driver_info->flags & FLAG_FRAMING_AX) {
1115 /* hard_mtu is still the default - the device does not support 1143 /* hard_mtu is still the default - the device does not support
@@ -1220,6 +1248,7 @@ static int ax88178_reset(struct usbnet *dev)
1220 __le16 eeprom; 1248 __le16 eeprom;
1221 u8 status; 1249 u8 status;
1222 int gpio0 = 0; 1250 int gpio0 = 0;
1251 u32 phyid;
1223 1252
1224 asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status); 1253 asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
1225 dbg("GPIO Status: 0x%04x", status); 1254 dbg("GPIO Status: 0x%04x", status);
@@ -1235,12 +1264,13 @@ static int ax88178_reset(struct usbnet *dev)
1235 data->ledmode = 0; 1264 data->ledmode = 0;
1236 gpio0 = 1; 1265 gpio0 = 1;
1237 } else { 1266 } else {
1238 data->phymode = le16_to_cpu(eeprom) & 7; 1267 data->phymode = le16_to_cpu(eeprom) & 0x7F;
1239 data->ledmode = le16_to_cpu(eeprom) >> 8; 1268 data->ledmode = le16_to_cpu(eeprom) >> 8;
1240 gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1; 1269 gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1;
1241 } 1270 }
1242 dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode); 1271 dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode);
1243 1272
1273 /* Power up external GigaPHY through AX88178 GPIO pin */
1244 asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40); 1274 asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
1245 if ((le16_to_cpu(eeprom) >> 8) != 1) { 1275 if ((le16_to_cpu(eeprom) >> 8) != 1) {
1246 asix_write_gpio(dev, 0x003c, 30); 1276 asix_write_gpio(dev, 0x003c, 30);
@@ -1252,6 +1282,13 @@ static int ax88178_reset(struct usbnet *dev)
1252 asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30); 1282 asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
1253 } 1283 }
1254 1284
1285 /* Read PHYID register *AFTER* powering up PHY */
1286 phyid = asix_get_phyid(dev);
1287 dbg("PHYID=0x%08x", phyid);
1288
1289 /* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */
1290 asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL);
1291
1255 asix_sw_reset(dev, 0); 1292 asix_sw_reset(dev, 0);
1256 msleep(150); 1293 msleep(150);
1257 1294
@@ -1396,7 +1433,6 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
1396{ 1433{
1397 int ret; 1434 int ret;
1398 u8 buf[ETH_ALEN]; 1435 u8 buf[ETH_ALEN];
1399 u32 phyid;
1400 struct asix_data *data = (struct asix_data *)&dev->data; 1436 struct asix_data *data = (struct asix_data *)&dev->data;
1401 1437
1402 data->eeprom_len = AX88772_EEPROM_LEN; 1438 data->eeprom_len = AX88772_EEPROM_LEN;
@@ -1423,12 +1459,12 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
1423 dev->net->netdev_ops = &ax88178_netdev_ops; 1459 dev->net->netdev_ops = &ax88178_netdev_ops;
1424 dev->net->ethtool_ops = &ax88178_ethtool_ops; 1460 dev->net->ethtool_ops = &ax88178_ethtool_ops;
1425 1461
1426 phyid = asix_get_phyid(dev); 1462 /* Blink LEDS so users know driver saw dongle */
1427 dbg("PHYID=0x%08x", phyid); 1463 asix_sw_reset(dev, 0);
1464 msleep(150);
1428 1465
1429 ret = ax88178_reset(dev); 1466 asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
1430 if (ret < 0) 1467 msleep(150);
1431 return ret;
1432 1468
1433 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ 1469 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
1434 if (dev->driver_info->flags & FLAG_FRAMING_AX) { 1470 if (dev->driver_info->flags & FLAG_FRAMING_AX) {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c924ea2bce07..99ed6eb4dfaf 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -567,7 +567,7 @@ static const struct usb_device_id products [] = {
567{ 567{
568 USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM, 568 USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM,
569 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 569 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
570 .driver_info = (unsigned long)&wwan_info, 570 .driver_info = 0,
571}, 571},
572 572
573/* 573/*
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index d43db32f9478..9c26c6390d69 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -144,10 +144,11 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
144 } 144 }
145 145
146 frame = (struct vl600_frame_hdr *) buf->data; 146 frame = (struct vl600_frame_hdr *) buf->data;
147 /* NOTE: Should check that frame->magic == 0x53544448? 147 /* Yes, check that frame->magic == 0x53544448 (or 0x44544d48),
148 * Otherwise if we receive garbage at the beginning of the frame 148 * otherwise we may run out of memory w/a bad packet */
149 * we may end up allocating a huge buffer and saving all the 149 if (ntohl(frame->magic) != 0x53544448 &&
150 * future incoming data into it. */ 150 ntohl(frame->magic) != 0x44544d48)
151 goto error;
151 152
152 if (buf->len < sizeof(*frame) || 153 if (buf->len < sizeof(*frame) ||
153 buf->len != le32_to_cpup(&frame->len)) { 154 buf->len != le32_to_cpup(&frame->len)) {
@@ -296,6 +297,11 @@ encapsulate:
296 * overwrite the remaining fields. 297 * overwrite the remaining fields.
297 */ 298 */
298 packet = (struct vl600_pkt_hdr *) skb->data; 299 packet = (struct vl600_pkt_hdr *) skb->data;
300 /* The VL600 wants IPv6 packets to have an IPv4 ethertype
301 * Since this modem only supports IPv4 and IPv6, just set all
302 * frames to 0x0800 (ETH_P_IP)
303 */
304 packet->h_proto = htons(ETH_P_IP);
299 memset(&packet->dummy, 0, sizeof(packet->dummy)); 305 memset(&packet->dummy, 0, sizeof(packet->dummy));
300 packet->len = cpu_to_le32(orig_len); 306 packet->len = cpu_to_le32(orig_len);
301 307
@@ -308,21 +314,12 @@ encapsulate:
308 if (skb->len < full_len) /* Pad */ 314 if (skb->len < full_len) /* Pad */
309 skb_put(skb, full_len - skb->len); 315 skb_put(skb, full_len - skb->len);
310 316
311 /* The VL600 wants IPv6 packets to have an IPv4 ethertype
312 * Check if this is an IPv6 packet, and set the ethertype
313 * to 0x800
314 */
315 if ((skb->data[sizeof(struct vl600_pkt_hdr *) + 0x22] & 0xf0) == 0x60) {
316 skb->data[sizeof(struct vl600_pkt_hdr *) + 0x20] = 0x08;
317 skb->data[sizeof(struct vl600_pkt_hdr *) + 0x21] = 0;
318 }
319
320 return skb; 317 return skb;
321} 318}
322 319
323static const struct driver_info vl600_info = { 320static const struct driver_info vl600_info = {
324 .description = "LG VL600 modem", 321 .description = "LG VL600 modem",
325 .flags = FLAG_ETHER | FLAG_RX_ASSEMBLE, 322 .flags = FLAG_RX_ASSEMBLE | FLAG_WWAN,
326 .bind = vl600_bind, 323 .bind = vl600_bind,
327 .unbind = vl600_unbind, 324 .unbind = vl600_unbind,
328 .status = usbnet_cdc_status, 325 .status = usbnet_cdc_status,
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 22a7cf951e72..a5b9b12ef268 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -51,6 +51,7 @@
51#define USB_VENDOR_ID_SMSC (0x0424) 51#define USB_VENDOR_ID_SMSC (0x0424)
52#define USB_PRODUCT_ID_LAN7500 (0x7500) 52#define USB_PRODUCT_ID_LAN7500 (0x7500)
53#define USB_PRODUCT_ID_LAN7505 (0x7505) 53#define USB_PRODUCT_ID_LAN7505 (0x7505)
54#define RXW_PADDING 2
54 55
55#define check_warn(ret, fmt, args...) \ 56#define check_warn(ret, fmt, args...) \
56 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); }) 57 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
@@ -1088,13 +1089,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1088 1089
1089 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); 1090 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
1090 le32_to_cpus(&rx_cmd_b); 1091 le32_to_cpus(&rx_cmd_b);
1091 skb_pull(skb, 4 + NET_IP_ALIGN); 1092 skb_pull(skb, 4 + RXW_PADDING);
1092 1093
1093 packet = skb->data; 1094 packet = skb->data;
1094 1095
1095 /* get the packet length */ 1096 /* get the packet length */
1096 size = (rx_cmd_a & RX_CMD_A_LEN) - NET_IP_ALIGN; 1097 size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
1097 align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4; 1098 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
1098 1099
1099 if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { 1100 if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
1100 netif_dbg(dev, rx_err, dev->net, 1101 netif_dbg(dev, rx_err, dev->net,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2f91acccb7db..8873c6e6fb96 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1827,7 +1827,8 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
1827 } 1827 }
1828 1828
1829 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ 1829 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
1830 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); 1830 if (AR_SREV_9300_20_OR_LATER(ah))
1831 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
1831} 1832}
1832 1833
1833/* 1834/*
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 93fbe6f40898..d2348a5a7809 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -286,7 +286,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
286 ath_start_ani(common); 286 ath_start_ani(common);
287 } 287 }
288 288
289 if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) { 289 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
290 struct ath_hw_antcomb_conf div_ant_conf; 290 struct ath_hw_antcomb_conf div_ant_conf;
291 u8 lna_conf; 291 u8 lna_conf;
292 292
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 85fa9cc73502..65ecb5bab25a 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -254,6 +254,8 @@ ath_reg_apply_active_scan_flags(struct wiphy *wiphy,
254 int r; 254 int r;
255 255
256 sband = wiphy->bands[IEEE80211_BAND_2GHZ]; 256 sband = wiphy->bands[IEEE80211_BAND_2GHZ];
257 if (!sband)
258 return;
257 259
258 /* 260 /*
259 * If no country IE has been received always enable active scan 261 * If no country IE has been received always enable active scan
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 58ea0e5fabfd..5f77cbe0b6aa 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -175,6 +175,7 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
175 } 175 }
176} 176}
177 177
178/* TODO: verify if needed for SSLPN or LCN */
178static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate) 179static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate)
179{ 180{
180 const struct b43_phy *phy = &dev->phy; 181 const struct b43_phy *phy = &dev->phy;
@@ -256,6 +257,9 @@ int b43_generate_txhdr(struct b43_wldev *dev,
256 unsigned int plcp_fragment_len; 257 unsigned int plcp_fragment_len;
257 u32 mac_ctl = 0; 258 u32 mac_ctl = 0;
258 u16 phy_ctl = 0; 259 u16 phy_ctl = 0;
260 bool fill_phy_ctl1 = (phy->type == B43_PHYTYPE_LP ||
261 phy->type == B43_PHYTYPE_N ||
262 phy->type == B43_PHYTYPE_HT);
259 u8 extra_ft = 0; 263 u8 extra_ft = 0;
260 struct ieee80211_rate *txrate; 264 struct ieee80211_rate *txrate;
261 struct ieee80211_tx_rate *rates; 265 struct ieee80211_tx_rate *rates;
@@ -531,7 +535,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
531 extra_ft |= B43_TXH_EFT_RTSFB_CCK; 535 extra_ft |= B43_TXH_EFT_RTSFB_CCK;
532 536
533 if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS && 537 if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS &&
534 phy->type == B43_PHYTYPE_N) { 538 fill_phy_ctl1) {
535 txhdr->phy_ctl1_rts = cpu_to_le16( 539 txhdr->phy_ctl1_rts = cpu_to_le16(
536 b43_generate_tx_phy_ctl1(dev, rts_rate)); 540 b43_generate_tx_phy_ctl1(dev, rts_rate));
537 txhdr->phy_ctl1_rts_fb = cpu_to_le16( 541 txhdr->phy_ctl1_rts_fb = cpu_to_le16(
@@ -552,7 +556,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
552 break; 556 break;
553 } 557 }
554 558
555 if (phy->type == B43_PHYTYPE_N) { 559 if (fill_phy_ctl1) {
556 txhdr->phy_ctl1 = 560 txhdr->phy_ctl1 =
557 cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate)); 561 cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate));
558 txhdr->phy_ctl1_fb = 562 txhdr->phy_ctl1_fb =
@@ -736,7 +740,14 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
736 740
737 /* Link quality statistics */ 741 /* Link quality statistics */
738 switch (chanstat & B43_RX_CHAN_PHYTYPE) { 742 switch (chanstat & B43_RX_CHAN_PHYTYPE) {
743 case B43_PHYTYPE_HT:
744 /* TODO: is max the right choice? */
745 status.signal = max_t(__s8,
746 max(rxhdr->phy_ht_power0, rxhdr->phy_ht_power1),
747 rxhdr->phy_ht_power2);
748 break;
739 case B43_PHYTYPE_N: 749 case B43_PHYTYPE_N:
750 /* Broadcom has code for min and avg, but always uses max */
740 if (rxhdr->power0 == 16 || rxhdr->power0 == 32) 751 if (rxhdr->power0 == 16 || rxhdr->power0 == 32)
741 status.signal = max(rxhdr->power1, rxhdr->power2); 752 status.signal = max(rxhdr->power1, rxhdr->power2);
742 else 753 else
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index 16c514d54afa..98d90747836a 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -249,6 +249,12 @@ struct b43_rxhdr_fw4 {
249 } __packed; 249 } __packed;
250 } __packed; 250 } __packed;
251 union { 251 union {
252 /* HT-PHY */
253 struct {
254 PAD_BYTES(1);
255 __s8 phy_ht_power0;
256 } __packed;
257
252 /* RSSI for N-PHYs */ 258 /* RSSI for N-PHYs */
253 struct { 259 struct {
254 __s8 power2; 260 __s8 power2;
@@ -257,7 +263,15 @@ struct b43_rxhdr_fw4 {
257 263
258 __le16 phy_status2; /* PHY RX Status 2 */ 264 __le16 phy_status2; /* PHY RX Status 2 */
259 } __packed; 265 } __packed;
260 __le16 phy_status3; /* PHY RX Status 3 */ 266 union {
267 /* HT-PHY */
268 struct {
269 __s8 phy_ht_power1;
270 __s8 phy_ht_power2;
271 } __packed;
272
273 __le16 phy_status3; /* PHY RX Status 3 */
274 } __packed;
261 union { 275 union {
262 /* Tested with 598.314, 644.1001 and 666.2 */ 276 /* Tested with 598.314, 644.1001 and 666.2 */
263 struct { 277 struct {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index b56a30297c26..6ebec8f42846 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -358,13 +358,14 @@ static uint nrxdactive(struct dma_info *di, uint h, uint t)
358 358
359static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) 359static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
360{ 360{
361 uint dmactrlflags = di->dma.dmactrlflags; 361 uint dmactrlflags;
362 362
363 if (di == NULL) { 363 if (di == NULL) {
364 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name)); 364 DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n"));
365 return 0; 365 return 0;
366 } 366 }
367 367
368 dmactrlflags = di->dma.dmactrlflags;
368 dmactrlflags &= ~mask; 369 dmactrlflags &= ~mask;
369 dmactrlflags |= flags; 370 dmactrlflags |= flags;
370 371
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index e12b48c2cff6..dd008b0e6417 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -191,6 +191,7 @@ static struct iwl_base_params iwl1000_base_params = {
191 .chain_noise_scale = 1000, 191 .chain_noise_scale = 1000,
192 .wd_timeout = IWL_DEF_WD_TIMEOUT, 192 .wd_timeout = IWL_DEF_WD_TIMEOUT,
193 .max_event_log_size = 128, 193 .max_event_log_size = 128,
194 .wd_disable = true,
194}; 195};
195static struct iwl_ht_params iwl1000_ht_params = { 196static struct iwl_ht_params iwl1000_ht_params = {
196 .ht_greenfield_support = true, 197 .ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index c511c98a89a8..f55fb2d1af52 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -364,6 +364,7 @@ static struct iwl_base_params iwl5000_base_params = {
364 .wd_timeout = IWL_LONG_WD_TIMEOUT, 364 .wd_timeout = IWL_LONG_WD_TIMEOUT,
365 .max_event_log_size = 512, 365 .max_event_log_size = 512,
366 .no_idle_support = true, 366 .no_idle_support = true,
367 .wd_disable = true,
367}; 368};
368static struct iwl_ht_params iwl5000_ht_params = { 369static struct iwl_ht_params iwl5000_ht_params = {
369 .ht_greenfield_support = true, 370 .ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 58a381c01c89..a7a6def40d05 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -528,6 +528,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
528 return 0; 528 return 0;
529} 529}
530 530
531void iwlagn_config_ht40(struct ieee80211_conf *conf,
532 struct iwl_rxon_context *ctx)
533{
534 if (conf_is_ht40_minus(conf)) {
535 ctx->ht.extension_chan_offset =
536 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
537 ctx->ht.is_40mhz = true;
538 } else if (conf_is_ht40_plus(conf)) {
539 ctx->ht.extension_chan_offset =
540 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
541 ctx->ht.is_40mhz = true;
542 } else {
543 ctx->ht.extension_chan_offset =
544 IEEE80211_HT_PARAM_CHA_SEC_NONE;
545 ctx->ht.is_40mhz = false;
546 }
547}
548
531int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) 549int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
532{ 550{
533 struct iwl_priv *priv = hw->priv; 551 struct iwl_priv *priv = hw->priv;
@@ -586,19 +604,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
586 ctx->ht.enabled = conf_is_ht(conf); 604 ctx->ht.enabled = conf_is_ht(conf);
587 605
588 if (ctx->ht.enabled) { 606 if (ctx->ht.enabled) {
589 if (conf_is_ht40_minus(conf)) { 607 /* if HT40 is used, it should not change
590 ctx->ht.extension_chan_offset = 608 * after associated except channel switch */
591 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 609 if (iwl_is_associated_ctx(ctx) &&
592 ctx->ht.is_40mhz = true; 610 !ctx->ht.is_40mhz)
593 } else if (conf_is_ht40_plus(conf)) { 611 iwlagn_config_ht40(conf, ctx);
594 ctx->ht.extension_chan_offset =
595 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
596 ctx->ht.is_40mhz = true;
597 } else {
598 ctx->ht.extension_chan_offset =
599 IEEE80211_HT_PARAM_CHA_SEC_NONE;
600 ctx->ht.is_40mhz = false;
601 }
602 } else 612 } else
603 ctx->ht.is_40mhz = false; 613 ctx->ht.is_40mhz = false;
604 614
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index ed6283623932..4b2aa1da0953 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -1268,9 +1268,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
1268 1268
1269 switch (keyconf->cipher) { 1269 switch (keyconf->cipher) {
1270 case WLAN_CIPHER_SUITE_TKIP: 1270 case WLAN_CIPHER_SUITE_TKIP:
1271 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1272 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1273
1274 if (sta) 1271 if (sta)
1275 addr = sta->addr; 1272 addr = sta->addr;
1276 else /* station mode case only */ 1273 else /* station mode case only */
@@ -1283,8 +1280,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
1283 seq.tkip.iv32, p1k, CMD_SYNC); 1280 seq.tkip.iv32, p1k, CMD_SYNC);
1284 break; 1281 break;
1285 case WLAN_CIPHER_SUITE_CCMP: 1282 case WLAN_CIPHER_SUITE_CCMP:
1286 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1287 /* fall through */
1288 case WLAN_CIPHER_SUITE_WEP40: 1283 case WLAN_CIPHER_SUITE_WEP40:
1289 case WLAN_CIPHER_SUITE_WEP104: 1284 case WLAN_CIPHER_SUITE_WEP104:
1290 ret = iwlagn_send_sta_key(priv, keyconf, sta_id, 1285 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index ccba69b7f8a7..bacc06c95e7a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2316,6 +2316,17 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2316 return -EOPNOTSUPP; 2316 return -EOPNOTSUPP;
2317 } 2317 }
2318 2318
2319 switch (key->cipher) {
2320 case WLAN_CIPHER_SUITE_TKIP:
2321 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2322 /* fall through */
2323 case WLAN_CIPHER_SUITE_CCMP:
2324 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2325 break;
2326 default:
2327 break;
2328 }
2329
2319 /* 2330 /*
2320 * We could program these keys into the hardware as well, but we 2331 * We could program these keys into the hardware as well, but we
2321 * don't expect much multicast traffic in IBSS and having keys 2332 * don't expect much multicast traffic in IBSS and having keys
@@ -2599,21 +2610,9 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
2599 2610
2600 /* Configure HT40 channels */ 2611 /* Configure HT40 channels */
2601 ctx->ht.enabled = conf_is_ht(conf); 2612 ctx->ht.enabled = conf_is_ht(conf);
2602 if (ctx->ht.enabled) { 2613 if (ctx->ht.enabled)
2603 if (conf_is_ht40_minus(conf)) { 2614 iwlagn_config_ht40(conf, ctx);
2604 ctx->ht.extension_chan_offset = 2615 else
2605 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2606 ctx->ht.is_40mhz = true;
2607 } else if (conf_is_ht40_plus(conf)) {
2608 ctx->ht.extension_chan_offset =
2609 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2610 ctx->ht.is_40mhz = true;
2611 } else {
2612 ctx->ht.extension_chan_offset =
2613 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2614 ctx->ht.is_40mhz = false;
2615 }
2616 } else
2617 ctx->ht.is_40mhz = false; 2616 ctx->ht.is_40mhz = false;
2618 2617
2619 if ((le16_to_cpu(ctx->staging.channel) != ch)) 2618 if ((le16_to_cpu(ctx->staging.channel) != ch))
@@ -3499,9 +3498,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
3499module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO); 3498module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
3500MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])"); 3499MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
3501 3500
3502module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO); 3501module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
3503MODULE_PARM_DESC(wd_disable, 3502MODULE_PARM_DESC(wd_disable,
3504 "Disable stuck queue watchdog timer (default: 0 [enabled])"); 3503 "Disable stuck queue watchdog timer 0=system default, "
3504 "1=disable, 2=enable (default: 0)");
3505 3505
3506/* 3506/*
3507 * set bt_coex_active to true, uCode will do kill/defer 3507 * set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 5b936ec1a541..3856abaea507 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -86,6 +86,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
86 struct ieee80211_vif *vif, 86 struct ieee80211_vif *vif,
87 struct ieee80211_bss_conf *bss_conf, 87 struct ieee80211_bss_conf *bss_conf,
88 u32 changes); 88 u32 changes);
89void iwlagn_config_ht40(struct ieee80211_conf *conf,
90 struct iwl_rxon_context *ctx);
89 91
90/* uCode */ 92/* uCode */
91int iwlagn_rx_calib_result(struct iwl_priv *priv, 93int iwlagn_rx_calib_result(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 001fdf140abb..fcf54160e4ed 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1810,11 +1810,23 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
1810{ 1810{
1811 unsigned int timeout = priv->cfg->base_params->wd_timeout; 1811 unsigned int timeout = priv->cfg->base_params->wd_timeout;
1812 1812
1813 if (timeout && !iwlagn_mod_params.wd_disable) 1813 if (!iwlagn_mod_params.wd_disable) {
1814 mod_timer(&priv->watchdog, 1814 /* use system default */
1815 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); 1815 if (timeout && !priv->cfg->base_params->wd_disable)
1816 else 1816 mod_timer(&priv->watchdog,
1817 del_timer(&priv->watchdog); 1817 jiffies +
1818 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1819 else
1820 del_timer(&priv->watchdog);
1821 } else {
1822 /* module parameter overwrite default configuration */
1823 if (timeout && iwlagn_mod_params.wd_disable == 2)
1824 mod_timer(&priv->watchdog,
1825 jiffies +
1826 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1827 else
1828 del_timer(&priv->watchdog);
1829 }
1818} 1830}
1819 1831
1820/** 1832/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 137da3380704..f2fc288f3dd3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -113,6 +113,7 @@ struct iwl_lib_ops {
113 * @shadow_reg_enable: HW shadhow register bit 113 * @shadow_reg_enable: HW shadhow register bit
114 * @no_idle_support: do not support idle mode 114 * @no_idle_support: do not support idle mode
115 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up 115 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
116 * wd_disable: disable watchdog timer
116 */ 117 */
117struct iwl_base_params { 118struct iwl_base_params {
118 int eeprom_size; 119 int eeprom_size;
@@ -134,6 +135,7 @@ struct iwl_base_params {
134 const bool shadow_reg_enable; 135 const bool shadow_reg_enable;
135 const bool no_idle_support; 136 const bool no_idle_support;
136 const bool hd_v2; 137 const bool hd_v2;
138 const bool wd_disable;
137}; 139};
138/* 140/*
139 * @advanced_bt_coexist: support advanced bt coexist 141 * @advanced_bt_coexist: support advanced bt coexist
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
index 1f7a93c67c45..14eaf37ce3b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -120,7 +120,7 @@ extern struct iwl_mod_params iwlagn_mod_params;
120 * @restart_fw: restart firmware, default = 1 120 * @restart_fw: restart firmware, default = 1
121 * @plcp_check: enable plcp health check, default = true 121 * @plcp_check: enable plcp health check, default = true
122 * @ack_check: disable ack health check, default = false 122 * @ack_check: disable ack health check, default = false
123 * @wd_disable: enable stuck queue check, default = false 123 * @wd_disable: enable stuck queue check, default = 0
124 * @bt_coex_active: enable bt coex, default = true 124 * @bt_coex_active: enable bt coex, default = true
125 * @led_mode: system default, default = 0 125 * @led_mode: system default, default = 0
126 * @no_sleep_autoadjust: disable autoadjust, default = true 126 * @no_sleep_autoadjust: disable autoadjust, default = true
@@ -141,7 +141,7 @@ struct iwl_mod_params {
141 int restart_fw; 141 int restart_fw;
142 bool plcp_check; 142 bool plcp_check;
143 bool ack_check; 143 bool ack_check;
144 bool wd_disable; 144 int wd_disable;
145 bool bt_coex_active; 145 bool bt_coex_active;
146 int led_mode; 146 int led_mode;
147 bool no_sleep_autoadjust; 147 bool no_sleep_autoadjust;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index da3411057afc..ce918980e977 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -990,29 +990,16 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
990 return 0; 990 return 0;
991} 991}
992 992
993static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans) 993static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
994{ 994{
995 unsigned long flags; 995 unsigned long flags;
996 struct iwl_trans_pcie *trans_pcie = 996 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
997 IWL_TRANS_GET_PCIE_TRANS(trans);
998 997
998 /* tell the device to stop sending interrupts */
999 spin_lock_irqsave(&trans->shrd->lock, flags); 999 spin_lock_irqsave(&trans->shrd->lock, flags);
1000 iwl_disable_interrupts(trans); 1000 iwl_disable_interrupts(trans);
1001 spin_unlock_irqrestore(&trans->shrd->lock, flags); 1001 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1002 1002
1003 /* wait to make sure we flush pending tasklet*/
1004 synchronize_irq(bus(trans)->irq);
1005 tasklet_kill(&trans_pcie->irq_tasklet);
1006}
1007
1008static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1009{
1010 /* stop and reset the on-board processor */
1011 iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1012
1013 /* tell the device to stop sending interrupts */
1014 iwl_trans_pcie_disable_sync_irq(trans);
1015
1016 /* device going down, Stop using ICT table */ 1003 /* device going down, Stop using ICT table */
1017 iwl_disable_ict(trans); 1004 iwl_disable_ict(trans);
1018 1005
@@ -1039,6 +1026,20 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1039 1026
1040 /* Stop the device, and put it in low power state */ 1027 /* Stop the device, and put it in low power state */
1041 iwl_apm_stop(priv(trans)); 1028 iwl_apm_stop(priv(trans));
1029
1030 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1031 * Clean again the interrupt here
1032 */
1033 spin_lock_irqsave(&trans->shrd->lock, flags);
1034 iwl_disable_interrupts(trans);
1035 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1036
1037 /* wait to make sure we flush pending tasklet*/
1038 synchronize_irq(bus(trans)->irq);
1039 tasklet_kill(&trans_pcie->irq_tasklet);
1040
1041 /* stop and reset the on-board processor */
1042 iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1042} 1043}
1043 1044
1044static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1045static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 4fcd653bddc4..a7f1ab28940d 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -634,7 +634,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
634 if (channel && 634 if (channel &&
635 !(channel->flags & IEEE80211_CHAN_DISABLED)) 635 !(channel->flags & IEEE80211_CHAN_DISABLED))
636 cfg80211_inform_bss(wiphy, channel, 636 cfg80211_inform_bss(wiphy, channel,
637 bssid, le64_to_cpu(*(__le64 *)tsfdesc), 637 bssid, get_unaligned_le64(tsfdesc),
638 capa, intvl, ie, ielen, 638 capa, intvl, ie, ielen,
639 LBS_SCAN_RSSI_TO_MBM(rssi), 639 LBS_SCAN_RSSI_TO_MBM(rssi),
640 GFP_KERNEL); 640 GFP_KERNEL);
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 11b69b300dc0..728baa445259 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -995,6 +995,7 @@ static int if_spi_host_to_card(struct lbs_private *priv,
995 spin_unlock_irqrestore(&card->buffer_lock, flags); 995 spin_unlock_irqrestore(&card->buffer_lock, flags);
996 break; 996 break;
997 default: 997 default:
998 kfree(packet);
998 netdev_err(priv->dev, "can't transfer buffer of type %d\n", 999 netdev_err(priv->dev, "can't transfer buffer of type %d\n",
999 type); 1000 type);
1000 err = -EINVAL; 1001 err = -EINVAL;
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index dae8dbb24a03..8d3ab378662b 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -819,8 +819,10 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
819 wildcard_ssid_tlv->header.len = cpu_to_le16( 819 wildcard_ssid_tlv->header.len = cpu_to_le16(
820 (u16) (ssid_len + sizeof(wildcard_ssid_tlv-> 820 (u16) (ssid_len + sizeof(wildcard_ssid_tlv->
821 max_ssid_length))); 821 max_ssid_length)));
822 wildcard_ssid_tlv->max_ssid_length = 822
823 user_scan_in->ssid_list[ssid_idx].max_len; 823 /* max_ssid_length = 0 tells firmware to perform
824 specific scan for the SSID filled */
825 wildcard_ssid_tlv->max_ssid_length = 0;
824 826
825 memcpy(wildcard_ssid_tlv->ssid, 827 memcpy(wildcard_ssid_tlv->ssid,
826 user_scan_in->ssid_list[ssid_idx].ssid, 828 user_scan_in->ssid_list[ssid_idx].ssid,
@@ -1469,7 +1471,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
1469 s32 rssi, const u8 *ie_buf, size_t ie_len, 1471 s32 rssi, const u8 *ie_buf, size_t ie_len,
1470 u16 beacon_period, u16 cap_info_bitmap, u8 band) 1472 u16 beacon_period, u16 cap_info_bitmap, u8 band)
1471{ 1473{
1472 struct mwifiex_bssdescriptor *bss_desc = NULL; 1474 struct mwifiex_bssdescriptor *bss_desc;
1473 int ret; 1475 int ret;
1474 unsigned long flags; 1476 unsigned long flags;
1475 u8 *beacon_ie; 1477 u8 *beacon_ie;
@@ -1484,6 +1486,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
1484 1486
1485 beacon_ie = kmemdup(ie_buf, ie_len, GFP_KERNEL); 1487 beacon_ie = kmemdup(ie_buf, ie_len, GFP_KERNEL);
1486 if (!beacon_ie) { 1488 if (!beacon_ie) {
1489 kfree(bss_desc);
1487 dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n"); 1490 dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
1488 return -ENOMEM; 1491 return -ENOMEM;
1489 } 1492 }
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index f18df82eeb92..78d0d6988553 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -588,8 +588,6 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
588 588
589 WARN_ON(priv->fw_state != FW_STATE_READY); 589 WARN_ON(priv->fw_state != FW_STATE_READY);
590 590
591 cancel_work_sync(&priv->work);
592
593 p54spi_power_off(priv); 591 p54spi_power_off(priv);
594 spin_lock_irqsave(&priv->tx_lock, flags); 592 spin_lock_irqsave(&priv->tx_lock, flags);
595 INIT_LIST_HEAD(&priv->tx_pending); 593 INIT_LIST_HEAD(&priv->tx_pending);
@@ -597,6 +595,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
597 595
598 priv->fw_state = FW_STATE_OFF; 596 priv->fw_state = FW_STATE_OFF;
599 mutex_unlock(&priv->mutex); 597 mutex_unlock(&priv->mutex);
598
599 cancel_work_sync(&priv->work);
600} 600}
601 601
602static int __devinit p54spi_probe(struct spi_device *spi) 602static int __devinit p54spi_probe(struct spi_device *spi)
@@ -656,6 +656,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
656 init_completion(&priv->fw_comp); 656 init_completion(&priv->fw_comp);
657 INIT_LIST_HEAD(&priv->tx_pending); 657 INIT_LIST_HEAD(&priv->tx_pending);
658 mutex_init(&priv->mutex); 658 mutex_init(&priv->mutex);
659 spin_lock_init(&priv->tx_lock);
659 SET_IEEE80211_DEV(hw, &spi->dev); 660 SET_IEEE80211_DEV(hw, &spi->dev);
660 priv->common.open = p54spi_op_start; 661 priv->common.open = p54spi_op_start;
661 priv->common.stop = p54spi_op_stop; 662 priv->common.stop = p54spi_op_stop;
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index d97a2caf582b..bc2ba80c47bb 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -778,7 +778,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
778 dwrq->flags = 0; 778 dwrq->flags = 0;
779 dwrq->length = 0; 779 dwrq->length = 0;
780 } 780 }
781 essid->octets[essid->length] = '\0'; 781 essid->octets[dwrq->length] = '\0';
782 memcpy(extra, essid->octets, dwrq->length); 782 memcpy(extra, essid->octets, dwrq->length);
783 kfree(essid); 783 kfree(essid);
784 784
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 3f183a15186e..1ba079dffb11 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3771,7 +3771,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
3771 /* Apparently the data is read from end to start */ 3771 /* Apparently the data is read from end to start */
3772 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg); 3772 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
3773 /* The returned value is in CPU order, but eeprom is le */ 3773 /* The returned value is in CPU order, but eeprom is le */
3774 rt2x00dev->eeprom[i] = cpu_to_le32(reg); 3774 *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
3775 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg); 3775 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
3776 *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg); 3776 *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
3777 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg); 3777 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index f1565792f270..377876315b8d 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -919,6 +919,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
919 { USB_DEVICE(0x050d, 0x935b) }, 919 { USB_DEVICE(0x050d, 0x935b) },
920 /* Buffalo */ 920 /* Buffalo */
921 { USB_DEVICE(0x0411, 0x00e8) }, 921 { USB_DEVICE(0x0411, 0x00e8) },
922 { USB_DEVICE(0x0411, 0x0158) },
922 { USB_DEVICE(0x0411, 0x016f) }, 923 { USB_DEVICE(0x0411, 0x016f) },
923 { USB_DEVICE(0x0411, 0x01a2) }, 924 { USB_DEVICE(0x0411, 0x01a2) },
924 /* Corega */ 925 /* Corega */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 2ec5c00235e6..99ff12d0c29d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -943,6 +943,7 @@ struct rt2x00_dev {
943 * Powersaving work 943 * Powersaving work
944 */ 944 */
945 struct delayed_work autowakeup_work; 945 struct delayed_work autowakeup_work;
946 struct work_struct sleep_work;
946 947
947 /* 948 /*
948 * Data queue arrays for RX, TX, Beacon and ATIM. 949 * Data queue arrays for RX, TX, Beacon and ATIM.
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index e1fb2a8569be..edd317fa7c0a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -465,6 +465,23 @@ static u8 *rt2x00lib_find_ie(u8 *data, unsigned int len, u8 ie)
465 return NULL; 465 return NULL;
466} 466}
467 467
468static void rt2x00lib_sleep(struct work_struct *work)
469{
470 struct rt2x00_dev *rt2x00dev =
471 container_of(work, struct rt2x00_dev, sleep_work);
472
473 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
474 return;
475
476 /*
477 * Check again is powersaving is enabled, to prevent races from delayed
478 * work execution.
479 */
480 if (!test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags))
481 rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf,
482 IEEE80211_CONF_CHANGE_PS);
483}
484
468static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev, 485static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
469 struct sk_buff *skb, 486 struct sk_buff *skb,
470 struct rxdone_entry_desc *rxdesc) 487 struct rxdone_entry_desc *rxdesc)
@@ -512,8 +529,7 @@ static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
512 cam |= (tim_ie->bitmap_ctrl & 0x01); 529 cam |= (tim_ie->bitmap_ctrl & 0x01);
513 530
514 if (!cam && !test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags)) 531 if (!cam && !test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags))
515 rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf, 532 queue_work(rt2x00dev->workqueue, &rt2x00dev->sleep_work);
516 IEEE80211_CONF_CHANGE_PS);
517} 533}
518 534
519static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev, 535static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev,
@@ -1141,6 +1157,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1141 1157
1142 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); 1158 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
1143 INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup); 1159 INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup);
1160 INIT_WORK(&rt2x00dev->sleep_work, rt2x00lib_sleep);
1144 1161
1145 /* 1162 /*
1146 * Let the driver probe the device to detect the capabilities. 1163 * Let the driver probe the device to detect the capabilities.
@@ -1197,6 +1214,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1197 */ 1214 */
1198 cancel_work_sync(&rt2x00dev->intf_work); 1215 cancel_work_sync(&rt2x00dev->intf_work);
1199 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work); 1216 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
1217 cancel_work_sync(&rt2x00dev->sleep_work);
1200 if (rt2x00_is_usb(rt2x00dev)) { 1218 if (rt2x00_is_usb(rt2x00dev)) {
1201 del_timer_sync(&rt2x00dev->txstatus_timer); 1219 del_timer_sync(&rt2x00dev->txstatus_timer);
1202 cancel_work_sync(&rt2x00dev->rxdone_work); 1220 cancel_work_sync(&rt2x00dev->rxdone_work);
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index db5262844543..55c8e50f45fd 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -395,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
395 if (mac->link_state != MAC80211_LINKED) 395 if (mac->link_state != MAC80211_LINKED)
396 return; 396 return;
397 397
398 spin_lock(&rtlpriv->locks.lps_lock); 398 spin_lock_irq(&rtlpriv->locks.lps_lock);
399 399
400 /* Idle for a while if we connect to AP a while ago. */ 400 /* Idle for a while if we connect to AP a while ago. */
401 if (mac->cnt_after_linked >= 2) { 401 if (mac->cnt_after_linked >= 2) {
@@ -407,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
407 } 407 }
408 } 408 }
409 409
410 spin_unlock(&rtlpriv->locks.lps_lock); 410 spin_unlock_irq(&rtlpriv->locks.lps_lock);
411} 411}
412 412
413/*Leave the leisure power save mode.*/ 413/*Leave the leisure power save mode.*/
@@ -416,8 +416,9 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
416 struct rtl_priv *rtlpriv = rtl_priv(hw); 416 struct rtl_priv *rtlpriv = rtl_priv(hw);
417 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 417 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
418 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 418 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
419 unsigned long flags;
419 420
420 spin_lock(&rtlpriv->locks.lps_lock); 421 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags);
421 422
422 if (ppsc->fwctrl_lps) { 423 if (ppsc->fwctrl_lps) {
423 if (ppsc->dot11_psmode != EACTIVE) { 424 if (ppsc->dot11_psmode != EACTIVE) {
@@ -438,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
438 rtl_lps_set_psmode(hw, EACTIVE); 439 rtl_lps_set_psmode(hw, EACTIVE);
439 } 440 }
440 } 441 }
441 spin_unlock(&rtlpriv->locks.lps_lock); 442 spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags);
442} 443}
443 444
444/* For sw LPS*/ 445/* For sw LPS*/
@@ -539,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
539 RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); 540 RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
540 } 541 }
541 542
542 spin_lock(&rtlpriv->locks.lps_lock); 543 spin_lock_irq(&rtlpriv->locks.lps_lock);
543 rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS); 544 rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
544 spin_unlock(&rtlpriv->locks.lps_lock); 545 spin_unlock_irq(&rtlpriv->locks.lps_lock);
545} 546}
546 547
547void rtl_swlps_rfon_wq_callback(void *data) 548void rtl_swlps_rfon_wq_callback(void *data)
@@ -574,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
574 if (rtlpriv->link_info.busytraffic) 575 if (rtlpriv->link_info.busytraffic)
575 return; 576 return;
576 577
577 spin_lock(&rtlpriv->locks.lps_lock); 578 spin_lock_irq(&rtlpriv->locks.lps_lock);
578 rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS); 579 rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
579 spin_unlock(&rtlpriv->locks.lps_lock); 580 spin_unlock_irq(&rtlpriv->locks.lps_lock);
580 581
581 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && 582 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
582 !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { 583 !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 592a10ac5929..3b585aadabfc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -569,7 +569,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
569 } 569 }
570 case ERFSLEEP:{ 570 case ERFSLEEP:{
571 if (ppsc->rfpwr_state == ERFOFF) 571 if (ppsc->rfpwr_state == ERFOFF)
572 break; 572 return false;
573 for (queue_id = 0, i = 0; 573 for (queue_id = 0, i = 0;
574 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { 574 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
575 ring = &pcipriv->dev.tx_ring[queue_id]; 575 ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 72852900df84..e49cf2244c75 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -548,7 +548,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
548 break; 548 break;
549 case ERFSLEEP: 549 case ERFSLEEP:
550 if (ppsc->rfpwr_state == ERFOFF) 550 if (ppsc->rfpwr_state == ERFOFF)
551 break; 551 return false;
552 for (queue_id = 0, i = 0; 552 for (queue_id = 0, i = 0;
553 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { 553 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
554 ring = &pcipriv->dev.tx_ring[queue_id]; 554 ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 3ac7af1c5509..0883349e1c83 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3374,7 +3374,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
3374 break; 3374 break;
3375 case ERFSLEEP: 3375 case ERFSLEEP:
3376 if (ppsc->rfpwr_state == ERFOFF) 3376 if (ppsc->rfpwr_state == ERFOFF)
3377 break; 3377 return false;
3378 3378
3379 for (queue_id = 0, i = 0; 3379 for (queue_id = 0, i = 0;
3380 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { 3380 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index f27171af979c..f10ac1ad9087 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -602,7 +602,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
602 } 602 }
603 case ERFSLEEP: 603 case ERFSLEEP:
604 if (ppsc->rfpwr_state == ERFOFF) 604 if (ppsc->rfpwr_state == ERFOFF)
605 break; 605 return false;
606 606
607 for (queue_id = 0, i = 0; 607 for (queue_id = 0, i = 0;
608 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { 608 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index 128ccb79318c..fc29c671cf3b 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -559,7 +559,7 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
559 break; 559 break;
560 } 560 }
561 /* Fail if SSID isn't present in the filters */ 561 /* Fail if SSID isn't present in the filters */
562 if (j == req->n_ssids) { 562 if (j == cmd->n_ssids) {
563 ret = -EINVAL; 563 ret = -EINVAL;
564 goto out_free; 564 goto out_free;
565 } 565 }
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0cb594c86090..15e332d08c8d 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1021,7 +1021,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1021 pending_idx = *((u16 *)skb->data); 1021 pending_idx = *((u16 *)skb->data);
1022 xen_netbk_idx_release(netbk, pending_idx); 1022 xen_netbk_idx_release(netbk, pending_idx);
1023 for (j = start; j < i; j++) { 1023 for (j = start; j < i; j++) {
1024 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); 1024 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1025 xen_netbk_idx_release(netbk, pending_idx); 1025 xen_netbk_idx_release(netbk, pending_idx);
1026 } 1026 }
1027 1027
@@ -1668,7 +1668,7 @@ static int __init netback_init(void)
1668 "netback/%u", group); 1668 "netback/%u", group);
1669 1669
1670 if (IS_ERR(netbk->task)) { 1670 if (IS_ERR(netbk->task)) {
1671 printk(KERN_ALERT "kthread_run() fails at netback\n"); 1671 printk(KERN_ALERT "kthread_create() fails at netback\n");
1672 del_timer(&netbk->net_timer); 1672 del_timer(&netbk->net_timer);
1673 rc = PTR_ERR(netbk->task); 1673 rc = PTR_ERR(netbk->task);
1674 goto failed_init; 1674 goto failed_init;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 6d3dd3988d0f..0f0cfa3bca30 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -26,11 +26,6 @@
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
30#ifndef NO_IRQ
31#define NO_IRQ 0
32#endif
33
34/** 29/**
35 * irq_of_parse_and_map - Parse and map an interrupt into linux virq space 30 * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
36 * @device: Device node of the device whose interrupt is to be mapped 31 * @device: Device node of the device whose interrupt is to be mapped
@@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
44 struct of_irq oirq; 39 struct of_irq oirq;
45 40
46 if (of_irq_map_one(dev, index, &oirq)) 41 if (of_irq_map_one(dev, index, &oirq))
47 return NO_IRQ; 42 return 0;
48 43
49 return irq_create_of_mapping(oirq.controller, oirq.specifier, 44 return irq_create_of_mapping(oirq.controller, oirq.specifier,
50 oirq.size); 45 oirq.size);
@@ -60,27 +55,27 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
60 */ 55 */
61struct device_node *of_irq_find_parent(struct device_node *child) 56struct device_node *of_irq_find_parent(struct device_node *child)
62{ 57{
63 struct device_node *p, *c = child; 58 struct device_node *p;
64 const __be32 *parp; 59 const __be32 *parp;
65 60
66 if (!of_node_get(c)) 61 if (!of_node_get(child))
67 return NULL; 62 return NULL;
68 63
69 do { 64 do {
70 parp = of_get_property(c, "interrupt-parent", NULL); 65 parp = of_get_property(child, "interrupt-parent", NULL);
71 if (parp == NULL) 66 if (parp == NULL)
72 p = of_get_parent(c); 67 p = of_get_parent(child);
73 else { 68 else {
74 if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) 69 if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
75 p = of_node_get(of_irq_dflt_pic); 70 p = of_node_get(of_irq_dflt_pic);
76 else 71 else
77 p = of_find_node_by_phandle(be32_to_cpup(parp)); 72 p = of_find_node_by_phandle(be32_to_cpup(parp));
78 } 73 }
79 of_node_put(c); 74 of_node_put(child);
80 c = p; 75 child = p;
81 } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL); 76 } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL);
82 77
83 return (p == child) ? NULL : p; 78 return p;
84} 79}
85 80
86/** 81/**
@@ -345,7 +340,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
345 340
346 /* Only dereference the resource if both the 341 /* Only dereference the resource if both the
347 * resource and the irq are valid. */ 342 * resource and the irq are valid. */
348 if (r && irq != NO_IRQ) { 343 if (r && irq) {
349 r->start = r->end = irq; 344 r->start = r->end = irq;
350 r->flags = IORESOURCE_IRQ; 345 r->flags = IORESOURCE_IRQ;
351 r->name = dev->full_name; 346 r->name = dev->full_name;
@@ -363,7 +358,7 @@ int of_irq_count(struct device_node *dev)
363{ 358{
364 int nr = 0; 359 int nr = 0;
365 360
366 while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ) 361 while (of_irq_to_resource(dev, nr, NULL))
367 nr++; 362 nr++;
368 363
369 return nr; 364 return nr;
@@ -383,7 +378,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
383 int i; 378 int i;
384 379
385 for (i = 0; i < nr_irqs; i++, res++) 380 for (i = 0; i < nr_irqs; i++, res++)
386 if (of_irq_to_resource(dev, i, res) == NO_IRQ) 381 if (!of_irq_to_resource(dev, i, res))
387 break; 382 break;
388 383
389 return i; 384 return i;
@@ -424,6 +419,8 @@ void __init of_irq_init(const struct of_device_id *matches)
424 419
425 desc->dev = np; 420 desc->dev = np;
426 desc->interrupt_parent = of_irq_find_parent(np); 421 desc->interrupt_parent = of_irq_find_parent(np);
422 if (desc->interrupt_parent == np)
423 desc->interrupt_parent = NULL;
427 list_add_tail(&desc->list, &intc_desc_list); 424 list_add_tail(&desc->list, &intc_desc_list);
428 } 425 }
429 426
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index dccd8636095c..f8c752e408a6 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -239,26 +239,45 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val)
239 return err; 239 return err;
240} 240}
241 241
242static int timer_mode;
243
242static int __init oprofile_init(void) 244static int __init oprofile_init(void)
243{ 245{
244 int err; 246 int err;
245 247
248 /* always init architecture to setup backtrace support */
246 err = oprofile_arch_init(&oprofile_ops); 249 err = oprofile_arch_init(&oprofile_ops);
247 if (err < 0 || timer) { 250
248 printk(KERN_INFO "oprofile: using timer interrupt.\n"); 251 timer_mode = err || timer; /* fall back to timer mode on errors */
252 if (timer_mode) {
253 if (!err)
254 oprofile_arch_exit();
249 err = oprofile_timer_init(&oprofile_ops); 255 err = oprofile_timer_init(&oprofile_ops);
250 if (err) 256 if (err)
251 return err; 257 return err;
252 } 258 }
253 return oprofilefs_register(); 259
260 err = oprofilefs_register();
261 if (!err)
262 return 0;
263
264 /* failed */
265 if (timer_mode)
266 oprofile_timer_exit();
267 else
268 oprofile_arch_exit();
269
270 return err;
254} 271}
255 272
256 273
257static void __exit oprofile_exit(void) 274static void __exit oprofile_exit(void)
258{ 275{
259 oprofile_timer_exit();
260 oprofilefs_unregister(); 276 oprofilefs_unregister();
261 oprofile_arch_exit(); 277 if (timer_mode)
278 oprofile_timer_exit();
279 else
280 oprofile_arch_exit();
262} 281}
263 282
264 283
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 3ef44624f510..878fba126582 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -110,6 +110,7 @@ int oprofile_timer_init(struct oprofile_operations *ops)
110 ops->start = oprofile_hrtimer_start; 110 ops->start = oprofile_hrtimer_start;
111 ops->stop = oprofile_hrtimer_stop; 111 ops->stop = oprofile_hrtimer_stop;
112 ops->cpu_type = "timer"; 112 ops->cpu_type = "timer";
113 printk(KERN_INFO "oprofile: using timer interrupt.\n");
113 return 0; 114 return 0;
114} 115}
115 116
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index b6f9749b4fa7..f02b5235056d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -76,6 +76,7 @@ config PCI_IOV
76 76
77config PCI_PRI 77config PCI_PRI
78 bool "PCI PRI support" 78 bool "PCI PRI support"
79 depends on PCI
79 select PCI_ATS 80 select PCI_ATS
80 help 81 help
81 PRI is the PCI Page Request Interface. It allows PCI devices that are 82 PRI is the PCI Page Request Interface. It allows PCI devices that are
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 596172b4ae95..fce1c54a0c8d 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -459,8 +459,17 @@ static int add_bridge(acpi_handle handle)
459{ 459{
460 acpi_status status; 460 acpi_status status;
461 unsigned long long tmp; 461 unsigned long long tmp;
462 struct acpi_pci_root *root;
462 acpi_handle dummy_handle; 463 acpi_handle dummy_handle;
463 464
465 /*
466 * We shouldn't use this bridge if PCIe native hotplug control has been
467 * granted by the BIOS for it.
468 */
469 root = acpi_pci_find_root(handle);
470 if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
471 return -ENODEV;
472
464 /* if the bridge doesn't have _STA, we assume it is always there */ 473 /* if the bridge doesn't have _STA, we assume it is always there */
465 status = acpi_get_handle(handle, "_STA", &dummy_handle); 474 status = acpi_get_handle(handle, "_STA", &dummy_handle);
466 if (ACPI_SUCCESS(status)) { 475 if (ACPI_SUCCESS(status)) {
@@ -1376,13 +1385,23 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
1376static acpi_status 1385static acpi_status
1377find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) 1386find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
1378{ 1387{
1388 struct acpi_pci_root *root;
1379 int *count = (int *)context; 1389 int *count = (int *)context;
1380 1390
1381 if (acpi_is_root_bridge(handle)) { 1391 if (!acpi_is_root_bridge(handle))
1382 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, 1392 return AE_OK;
1383 handle_hotplug_event_bridge, NULL); 1393
1384 (*count)++; 1394 root = acpi_pci_find_root(handle);
1385 } 1395 if (!root)
1396 return AE_OK;
1397
1398 if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
1399 return AE_OK;
1400
1401 (*count)++;
1402 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
1403 handle_hotplug_event_bridge, NULL);
1404
1386 return AE_OK ; 1405 return AE_OK ;
1387} 1406}
1388 1407
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 1e9c9aacc3a6..085dbb5fc168 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -213,9 +213,6 @@ static int board_added(struct slot *p_slot)
213 goto err_exit; 213 goto err_exit;
214 } 214 }
215 215
216 /* Wait for 1 second after checking link training status */
217 msleep(1000);
218
219 /* Check for a power fault */ 216 /* Check for a power fault */
220 if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) { 217 if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
221 ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot)); 218 ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 96dc4734e4af..7b1414810ae3 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -280,6 +280,14 @@ int pciehp_check_link_status(struct controller *ctrl)
280 else 280 else
281 msleep(1000); 281 msleep(1000);
282 282
283 /*
284 * Need to wait for 1000 ms after Data Link Layer Link Active
285 * (DLLLA) bit reads 1b before sending configuration request.
286 * We need it before checking Link Training (LT) bit becuase
287 * LT is still set even after DLLLA bit is set on some platform.
288 */
289 msleep(1000);
290
283 retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); 291 retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
284 if (retval) { 292 if (retval) {
285 ctrl_err(ctrl, "Cannot read LNKSTATUS register\n"); 293 ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
@@ -294,6 +302,16 @@ int pciehp_check_link_status(struct controller *ctrl)
294 return retval; 302 return retval;
295 } 303 }
296 304
305 /*
306 * If the port supports Link speeds greater than 5.0 GT/s, we
307 * must wait for 100 ms after Link training completes before
308 * sending configuration request.
309 */
310 if (ctrl->pcie->port->subordinate->max_bus_speed > PCIE_SPEED_5_0GT)
311 msleep(100);
312
313 pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
314
297 return retval; 315 return retval;
298} 316}
299 317
@@ -484,7 +502,6 @@ int pciehp_power_on_slot(struct slot * slot)
484 u16 slot_cmd; 502 u16 slot_cmd;
485 u16 cmd_mask; 503 u16 cmd_mask;
486 u16 slot_status; 504 u16 slot_status;
487 u16 lnk_status;
488 int retval = 0; 505 int retval = 0;
489 506
490 /* Clear sticky power-fault bit from previous power failures */ 507 /* Clear sticky power-fault bit from previous power failures */
@@ -516,14 +533,6 @@ int pciehp_power_on_slot(struct slot * slot)
516 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 533 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
517 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 534 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
518 535
519 retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
520 if (retval) {
521 ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n",
522 __func__);
523 return retval;
524 }
525 pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
526
527 return retval; 536 return retval;
528} 537}
529 538
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index aca972bbfb4c..dd7e0c51a33e 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -278,8 +278,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
278 278
279static int is_shpc_capable(struct pci_dev *dev) 279static int is_shpc_capable(struct pci_dev *dev)
280{ 280{
281 if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == 281 if (dev->vendor == PCI_VENDOR_ID_AMD &&
282 PCI_DEVICE_ID_AMD_GOLAM_7450)) 282 dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
283 return 1; 283 return 1;
284 if (!pci_find_capability(dev, PCI_CAP_ID_SHPC)) 284 if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
285 return 0; 285 return 0;
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 36547f0ce305..75ba2311b54f 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -944,8 +944,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
944 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */ 944 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
945 ctrl_dbg(ctrl, "Hotplug Controller:\n"); 945 ctrl_dbg(ctrl, "Hotplug Controller:\n");
946 946
947 if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device == 947 if (pdev->vendor == PCI_VENDOR_ID_AMD &&
948 PCI_DEVICE_ID_AMD_GOLAM_7450)) { 948 pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) {
949 /* amd shpc driver doesn't use Base Offset; assume 0 */ 949 /* amd shpc driver doesn't use Base Offset; assume 0 */
950 ctrl->mmio_base = pci_resource_start(pdev, 0); 950 ctrl->mmio_base = pci_resource_start(pdev, 0);
951 ctrl->mmio_size = pci_resource_len(pdev, 0); 951 ctrl->mmio_size = pci_resource_len(pdev, 0);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index ef566443f945..e17e2f8001d2 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -2,23 +2,17 @@
2# PINCTRL infrastructure and drivers 2# PINCTRL infrastructure and drivers
3# 3#
4 4
5menuconfig PINCTRL 5config PINCTRL
6 bool "PINCTRL Support" 6 bool
7 depends on EXPERIMENTAL 7 depends on EXPERIMENTAL
8 help
9 This enables the PINCTRL subsystem for controlling pins
10 on chip packages, for example multiplexing pins on primarily
11 PGA and BGA packages for systems on chip.
12
13 If unsure, say N.
14 8
15if PINCTRL 9if PINCTRL
16 10
11menu "Pin controllers"
12 depends on PINCTRL
13
17config PINMUX 14config PINMUX
18 bool "Support pinmux controllers" 15 bool "Support pinmux controllers"
19 help
20 Say Y here if you want the pincontrol subsystem to handle pin
21 multiplexing drivers.
22 16
23config DEBUG_PINCTRL 17config DEBUG_PINCTRL
24 bool "Debug PINCTRL calls" 18 bool "Debug PINCTRL calls"
@@ -30,14 +24,12 @@ config PINMUX_SIRF
30 bool "CSR SiRFprimaII pinmux driver" 24 bool "CSR SiRFprimaII pinmux driver"
31 depends on ARCH_PRIMA2 25 depends on ARCH_PRIMA2
32 select PINMUX 26 select PINMUX
33 help
34 Say Y here to enable the SiRFprimaII pinmux driver
35 27
36config PINMUX_U300 28config PINMUX_U300
37 bool "U300 pinmux driver" 29 bool "U300 pinmux driver"
38 depends on ARCH_U300 30 depends on ARCH_U300
39 select PINMUX 31 select PINMUX
40 help 32
41 Say Y here to enable the U300 pinmux driver 33endmenu
42 34
43endif 35endif
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index f4e3d82379d7..7f43cf86d776 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -83,8 +83,10 @@ config DELL_LAPTOP
83 depends on EXPERIMENTAL 83 depends on EXPERIMENTAL
84 depends on BACKLIGHT_CLASS_DEVICE 84 depends on BACKLIGHT_CLASS_DEVICE
85 depends on RFKILL || RFKILL = n 85 depends on RFKILL || RFKILL = n
86 depends on POWER_SUPPLY
87 depends on SERIO_I8042 86 depends on SERIO_I8042
87 select POWER_SUPPLY
88 select LEDS_CLASS
89 select NEW_LEDS
88 default n 90 default n
89 ---help--- 91 ---help---
90 This driver adds support for rfkill and backlight control to Dell 92 This driver adds support for rfkill and backlight control to Dell
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index a43cfd906c6d..d93e962f2610 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -589,14 +589,14 @@ static const struct backlight_ops dell_ops = {
589 .update_status = dell_send_intensity, 589 .update_status = dell_send_intensity,
590}; 590};
591 591
592static void touchpad_led_on() 592static void touchpad_led_on(void)
593{ 593{
594 int command = 0x97; 594 int command = 0x97;
595 char data = 1; 595 char data = 1;
596 i8042_command(&data, command | 1 << 12); 596 i8042_command(&data, command | 1 << 12);
597} 597}
598 598
599static void touchpad_led_off() 599static void touchpad_led_off(void)
600{ 600{
601 int command = 0x97; 601 int command = 0x97;
602 char data = 2; 602 char data = 2;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 13ef8c37471d..dcdc1f4a4624 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -121,6 +121,7 @@ struct toshiba_acpi_dev {
121 int illumination_supported:1; 121 int illumination_supported:1;
122 int video_supported:1; 122 int video_supported:1;
123 int fan_supported:1; 123 int fan_supported:1;
124 int system_event_supported:1;
124 125
125 struct mutex mutex; 126 struct mutex mutex;
126}; 127};
@@ -724,7 +725,7 @@ static int keys_proc_show(struct seq_file *m, void *v)
724 u32 hci_result; 725 u32 hci_result;
725 u32 value; 726 u32 value;
726 727
727 if (!dev->key_event_valid) { 728 if (!dev->key_event_valid && dev->system_event_supported) {
728 hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result); 729 hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
729 if (hci_result == HCI_SUCCESS) { 730 if (hci_result == HCI_SUCCESS) {
730 dev->key_event_valid = 1; 731 dev->key_event_valid = 1;
@@ -964,6 +965,8 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
964 965
965 /* enable event fifo */ 966 /* enable event fifo */
966 hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result); 967 hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
968 if (hci_result == HCI_SUCCESS)
969 dev->system_event_supported = 1;
967 970
968 props.type = BACKLIGHT_PLATFORM; 971 props.type = BACKLIGHT_PLATFORM;
969 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; 972 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
@@ -1032,12 +1035,15 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
1032{ 1035{
1033 struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev); 1036 struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
1034 u32 hci_result, value; 1037 u32 hci_result, value;
1038 int retries = 3;
1035 1039
1036 if (event != 0x80) 1040 if (!dev->system_event_supported || event != 0x80)
1037 return; 1041 return;
1042
1038 do { 1043 do {
1039 hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result); 1044 hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
1040 if (hci_result == HCI_SUCCESS) { 1045 switch (hci_result) {
1046 case HCI_SUCCESS:
1041 if (value == 0x100) 1047 if (value == 0x100)
1042 continue; 1048 continue;
1043 /* act on key press; ignore key release */ 1049 /* act on key press; ignore key release */
@@ -1049,14 +1055,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
1049 pr_info("Unknown key %x\n", 1055 pr_info("Unknown key %x\n",
1050 value); 1056 value);
1051 } 1057 }
1052 } else if (hci_result == HCI_NOT_SUPPORTED) { 1058 break;
1059 case HCI_NOT_SUPPORTED:
1053 /* This is a workaround for an unresolved issue on 1060 /* This is a workaround for an unresolved issue on
1054 * some machines where system events sporadically 1061 * some machines where system events sporadically
1055 * become disabled. */ 1062 * become disabled. */
1056 hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result); 1063 hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
1057 pr_notice("Re-enabled hotkeys\n"); 1064 pr_notice("Re-enabled hotkeys\n");
1065 /* fall through */
1066 default:
1067 retries--;
1068 break;
1058 } 1069 }
1059 } while (hci_result != HCI_EMPTY); 1070 } while (retries && hci_result != HCI_EMPTY);
1060} 1071}
1061 1072
1062 1073
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index cffcb7c00b00..01fa671ec97f 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
61#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5) 61#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5)
62#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6) 62#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6)
63#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7) 63#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7)
64#define PMIC_BATT_CHR_EXCPT_MASK 0xC6 64#define PMIC_BATT_CHR_EXCPT_MASK 0x86
65
65#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31) 66#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31)
66#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF 67#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF
67 68
@@ -304,11 +305,6 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
304 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING; 305 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
305 pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT); 306 pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
306 batt_exception = 1; 307 batt_exception = 1;
307 } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
308 pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
309 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
310 pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
311 batt_exception = 1;
312 } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) { 308 } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
313 pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT; 309 pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
314 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING; 310 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -316,6 +312,10 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
316 batt_exception = 1; 312 batt_exception = 1;
317 } else { 313 } else {
318 pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD; 314 pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
315 if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
316 /* PMIC will change charging current automatically */
317 pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
318 }
319 } 319 }
320 } 320 }
321 321
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index d9fb729535a1..fb7300837fee 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -952,7 +952,7 @@ static int ps3_vuart_bus_interrupt_get(void)
952 } 952 }
953 953
954 result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler, 954 result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler,
955 IRQF_DISABLED, "vuart", &vuart_bus_priv); 955 0, "vuart", &vuart_bus_priv);
956 956
957 if (result) { 957 if (result) {
958 pr_debug("%s:%d: request_irq failed (%d)\n", 958 pr_debug("%s:%d: request_irq failed (%d)\n",
diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
index cc328dec946b..8c3f5adf1bc6 100644
--- a/drivers/ps3/ps3stor_lib.c
+++ b/drivers/ps3/ps3stor_lib.c
@@ -167,7 +167,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
167 goto fail_close_device; 167 goto fail_close_device;
168 } 168 }
169 169
170 error = request_irq(dev->irq, handler, IRQF_DISABLED, 170 error = request_irq(dev->irq, handler, 0,
171 dev->sbd.core.driver->name, dev); 171 dev->sbd.core.driver->name, dev);
172 if (error) { 172 if (error) {
173 dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n", 173 dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n",
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index cf3f9997546d..10451a15e828 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
101 101
102static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp) 102static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
103{ 103{
104 return 1; /* always round timer functions to one nanosecond */ 104 tp->tv_sec = 0;
105 tp->tv_nsec = 1;
106 return 0;
105} 107}
106 108
107static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp) 109static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 5225930a10cd..691b1ab1a3d0 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
851 INIT_WORK(&priv->idb_work, tsi721_db_dpc); 851 INIT_WORK(&priv->idb_work, tsi721_db_dpc);
852 852
853 /* Allocate buffer for inbound doorbells queue */ 853 /* Allocate buffer for inbound doorbells queue */
854 priv->idb_base = dma_alloc_coherent(&priv->pdev->dev, 854 priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
855 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 855 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
856 &priv->idb_dma, GFP_KERNEL); 856 &priv->idb_dma, GFP_KERNEL);
857 if (!priv->idb_base) 857 if (!priv->idb_base)
858 return -ENOMEM; 858 return -ENOMEM;
859 859
860 memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
861
862 dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n", 860 dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
863 priv->idb_base, (unsigned long long)priv->idb_dma); 861 priv->idb_base, (unsigned long long)priv->idb_dma);
864 862
@@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
904 */ 902 */
905 903
906 /* Allocate space for DMA descriptors */ 904 /* Allocate space for DMA descriptors */
907 bd_ptr = dma_alloc_coherent(&priv->pdev->dev, 905 bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
908 bd_num * sizeof(struct tsi721_dma_desc), 906 bd_num * sizeof(struct tsi721_dma_desc),
909 &bd_phys, GFP_KERNEL); 907 &bd_phys, GFP_KERNEL);
910 if (!bd_ptr) 908 if (!bd_ptr)
@@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
913 priv->bdma[chnum].bd_phys = bd_phys; 911 priv->bdma[chnum].bd_phys = bd_phys;
914 priv->bdma[chnum].bd_base = bd_ptr; 912 priv->bdma[chnum].bd_base = bd_ptr;
915 913
916 memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
917
918 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", 914 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
919 bd_ptr, (unsigned long long)bd_phys); 915 bd_ptr, (unsigned long long)bd_phys);
920 916
@@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
922 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? 918 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
923 bd_num : TSI721_DMA_MINSTSSZ; 919 bd_num : TSI721_DMA_MINSTSSZ;
924 sts_size = roundup_pow_of_two(sts_size); 920 sts_size = roundup_pow_of_two(sts_size);
925 sts_ptr = dma_alloc_coherent(&priv->pdev->dev, 921 sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
926 sts_size * sizeof(struct tsi721_dma_sts), 922 sts_size * sizeof(struct tsi721_dma_sts),
927 &sts_phys, GFP_KERNEL); 923 &sts_phys, GFP_KERNEL);
928 if (!sts_ptr) { 924 if (!sts_ptr) {
@@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
938 priv->bdma[chnum].sts_base = sts_ptr; 934 priv->bdma[chnum].sts_base = sts_ptr;
939 priv->bdma[chnum].sts_size = sts_size; 935 priv->bdma[chnum].sts_size = sts_size;
940 936
941 memset(sts_ptr, 0, sts_size);
942
943 dev_dbg(&priv->pdev->dev, 937 dev_dbg(&priv->pdev->dev,
944 "desc status FIFO @ %p (phys = %llx) size=0x%x\n", 938 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
945 sts_ptr, (unsigned long long)sts_phys, sts_size); 939 sts_ptr, (unsigned long long)sts_phys, sts_size);
@@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
1400 1394
1401 /* Outbound message descriptor status FIFO allocation */ 1395 /* Outbound message descriptor status FIFO allocation */
1402 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); 1396 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
1403 priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev, 1397 priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
1404 priv->omsg_ring[mbox].sts_size * 1398 priv->omsg_ring[mbox].sts_size *
1405 sizeof(struct tsi721_dma_sts), 1399 sizeof(struct tsi721_dma_sts),
1406 &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); 1400 &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
@@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
1412 goto out_desc; 1406 goto out_desc;
1413 } 1407 }
1414 1408
1415 memset(priv->omsg_ring[mbox].sts_base, 0,
1416 entries * sizeof(struct tsi721_dma_sts));
1417
1418 /* 1409 /*
1419 * Configure Outbound Messaging Engine 1410 * Configure Outbound Messaging Engine
1420 */ 1411 */
@@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
2116 INIT_LIST_HEAD(&mport->dbells); 2107 INIT_LIST_HEAD(&mport->dbells);
2117 2108
2118 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); 2109 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
2119 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0); 2110 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
2120 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); 2111 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
2121 strcpy(mport->name, "Tsi721 mport"); 2112 strcpy(mport->name, "Tsi721 mport");
2122 2113
2123 /* Hook up interrupt handler */ 2114 /* Hook up interrupt handler */
@@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2163 const struct pci_device_id *id) 2154 const struct pci_device_id *id)
2164{ 2155{
2165 struct tsi721_device *priv; 2156 struct tsi721_device *priv;
2166 int i; 2157 int i, cap;
2167 int err; 2158 int err;
2168 u32 regval; 2159 u32 regval;
2169 2160
@@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2271 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); 2262 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
2272 } 2263 }
2273 2264
2274 /* Clear "no snoop" and "relaxed ordering" bits. */ 2265 cap = pci_pcie_cap(pdev);
2275 pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval); 2266 BUG_ON(cap == 0);
2276 regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN); 2267
2277 pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval); 2268 /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
2269 pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
2270 regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
2271 PCI_EXP_DEVCTL_NOSNOOP_EN);
2272 regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
2273 pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
2274
2275 /* Adjust PCIe completion timeout. */
2276 pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
2277 regval &= ~(0x0f);
2278 pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
2278 2279
2279 /* 2280 /*
2280 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block 2281 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 58be4deb1402..822e54c394d5 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -72,6 +72,8 @@
72#define TSI721_MSIXPBA_OFFSET 0x2a000 72#define TSI721_MSIXPBA_OFFSET 0x2a000
73#define TSI721_PCIECFG_EPCTL 0x400 73#define TSI721_PCIECFG_EPCTL 0x400
74 74
75#define MAX_READ_REQUEST_SZ_SHIFT 12
76
75/* 77/*
76 * Event Management Registers 78 * Event Management Registers
77 */ 79 */
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 5abeb3ac3e8d..298c6c6a2795 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -160,7 +160,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id)
160 break; 160 break;
161 } 161 }
162 162
163 if (!ri) 163 if (i == ARRAY_SIZE(aat2870_regulators))
164 return NULL; 164 return NULL;
165 165
166 ri->enable_addr = AAT2870_LDO_EN; 166 ri->enable_addr = AAT2870_LDO_EN;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 669d02160221..938398f3e869 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2799,8 +2799,8 @@ void regulator_unregister(struct regulator_dev *rdev)
2799 list_del(&rdev->list); 2799 list_del(&rdev->list);
2800 if (rdev->supply) 2800 if (rdev->supply)
2801 regulator_put(rdev->supply); 2801 regulator_put(rdev->supply);
2802 device_unregister(&rdev->dev);
2803 kfree(rdev->constraints); 2802 kfree(rdev->constraints);
2803 device_unregister(&rdev->dev);
2804 mutex_unlock(&regulator_list_mutex); 2804 mutex_unlock(&regulator_list_mutex);
2805} 2805}
2806EXPORT_SYMBOL_GPL(regulator_unregister); 2806EXPORT_SYMBOL_GPL(regulator_unregister);
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 66d2d60b436a..b552aae55b41 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -664,10 +664,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
664 664
665 switch (id) { 665 switch (id) {
666 case TPS65910_REG_VDD1: 666 case TPS65910_REG_VDD1:
667 dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1; 667 dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
668 if (dcdc_mult == 1) 668 if (dcdc_mult == 1)
669 dcdc_mult--; 669 dcdc_mult--;
670 vsel = (selector % VDD1_2_NUM_VOLTS) + 3; 670 vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
671 671
672 tps65910_modify_bits(pmic, TPS65910_VDD1, 672 tps65910_modify_bits(pmic, TPS65910_VDD1,
673 (dcdc_mult << VDD1_VGAIN_SEL_SHIFT), 673 (dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
@@ -675,10 +675,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
675 tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel); 675 tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
676 break; 676 break;
677 case TPS65910_REG_VDD2: 677 case TPS65910_REG_VDD2:
678 dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1; 678 dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
679 if (dcdc_mult == 1) 679 if (dcdc_mult == 1)
680 dcdc_mult--; 680 dcdc_mult--;
681 vsel = (selector % VDD1_2_NUM_VOLTS) + 3; 681 vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
682 682
683 tps65910_modify_bits(pmic, TPS65910_VDD2, 683 tps65910_modify_bits(pmic, TPS65910_VDD2,
684 (dcdc_mult << VDD2_VGAIN_SEL_SHIFT), 684 (dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
@@ -756,9 +756,9 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
756 switch (id) { 756 switch (id) {
757 case TPS65910_REG_VDD1: 757 case TPS65910_REG_VDD1:
758 case TPS65910_REG_VDD2: 758 case TPS65910_REG_VDD2:
759 mult = (selector / VDD1_2_NUM_VOLTS) + 1; 759 mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
760 volt = VDD1_2_MIN_VOLT + 760 volt = VDD1_2_MIN_VOLT +
761 (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET; 761 (selector % VDD1_2_NUM_VOLT_FINE) * VDD1_2_OFFSET;
762 break; 762 break;
763 case TPS65911_REG_VDDCTRL: 763 case TPS65911_REG_VDDCTRL:
764 volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET); 764 volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
@@ -947,6 +947,8 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
947 947
948 if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) { 948 if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
949 pmic->desc[i].ops = &tps65910_ops_dcdc; 949 pmic->desc[i].ops = &tps65910_ops_dcdc;
950 pmic->desc[i].n_voltages = VDD1_2_NUM_VOLT_FINE *
951 VDD1_2_NUM_VOLT_COARSE;
950 } else if (i == TPS65910_REG_VDD3) { 952 } else if (i == TPS65910_REG_VDD3) {
951 if (tps65910_chip_id(tps65910) == TPS65910) 953 if (tps65910_chip_id(tps65910) == TPS65910)
952 pmic->desc[i].ops = &tps65910_ops_vdd3; 954 pmic->desc[i].ops = &tps65910_ops_vdd3;
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index ee8747f4fa08..11cc308d66e9 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -71,6 +71,7 @@ struct twlreg_info {
71#define VREG_TYPE 1 71#define VREG_TYPE 1
72#define VREG_REMAP 2 72#define VREG_REMAP 2
73#define VREG_DEDICATED 3 /* LDO control */ 73#define VREG_DEDICATED 3 /* LDO control */
74#define VREG_VOLTAGE_SMPS_4030 9
74/* TWL6030 register offsets */ 75/* TWL6030 register offsets */
75#define VREG_TRANS 1 76#define VREG_TRANS 1
76#define VREG_STATE 2 77#define VREG_STATE 2
@@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_ops = {
514 .get_status = twl4030reg_get_status, 515 .get_status = twl4030reg_get_status,
515}; 516};
516 517
518static int
519twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
520 unsigned *selector)
521{
522 struct twlreg_info *info = rdev_get_drvdata(rdev);
523 int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
524
525 twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030,
526 vsel);
527 return 0;
528}
529
530static int twl4030smps_get_voltage(struct regulator_dev *rdev)
531{
532 struct twlreg_info *info = rdev_get_drvdata(rdev);
533 int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
534 VREG_VOLTAGE_SMPS_4030);
535
536 return vsel * 12500 + 600000;
537}
538
539static struct regulator_ops twl4030smps_ops = {
540 .set_voltage = twl4030smps_set_voltage,
541 .get_voltage = twl4030smps_get_voltage,
542};
543
517static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) 544static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
518{ 545{
519 struct twlreg_info *info = rdev_get_drvdata(rdev); 546 struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops = {
856 }, \ 883 }, \
857 } 884 }
858 885
886#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
887 { \
888 .base = offset, \
889 .id = num, \
890 .delay = turnon_delay, \
891 .remap = remap_conf, \
892 .desc = { \
893 .name = #label, \
894 .id = TWL4030_REG_##label, \
895 .ops = &twl4030smps_ops, \
896 .type = REGULATOR_VOLTAGE, \
897 .owner = THIS_MODULE, \
898 }, \
899 }
900
859#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \ 901#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
860 .base = offset, \ 902 .base = offset, \
861 .min_mV = min_mVolts, \ 903 .min_mV = min_mVolts, \
@@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = {
947 TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08), 989 TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
948 TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08), 990 TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
949 TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08), 991 TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
950 TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08), 992 TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08),
951 TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08), 993 TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08),
952 TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08), 994 TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
953 TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08), 995 TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
954 TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08), 996 TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e8326f26fa2f..dc4c2748bbc3 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -63,7 +63,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
63 */ 63 */
64 delta = timespec_sub(old_system, old_rtc); 64 delta = timespec_sub(old_system, old_rtc);
65 delta_delta = timespec_sub(delta, old_delta); 65 delta_delta = timespec_sub(delta, old_delta);
66 if (abs(delta_delta.tv_sec) >= 2) { 66 if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
67 /* 67 /*
68 * if delta_delta is too large, assume time correction 68 * if delta_delta is too large, assume time correction
69 * has occured and set old_delta to the current delta. 69 * has occured and set old_delta to the current delta.
@@ -97,9 +97,8 @@ static int rtc_resume(struct device *dev)
97 rtc_tm_to_time(&tm, &new_rtc.tv_sec); 97 rtc_tm_to_time(&tm, &new_rtc.tv_sec);
98 new_rtc.tv_nsec = 0; 98 new_rtc.tv_nsec = 0;
99 99
100 if (new_rtc.tv_sec <= old_rtc.tv_sec) { 100 if (new_rtc.tv_sec < old_rtc.tv_sec) {
101 if (new_rtc.tv_sec < old_rtc.tv_sec) 101 pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
102 pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
103 return 0; 102 return 0;
104 } 103 }
105 104
@@ -116,7 +115,8 @@ static int rtc_resume(struct device *dev)
116 sleep_time = timespec_sub(sleep_time, 115 sleep_time = timespec_sub(sleep_time,
117 timespec_sub(new_system, old_system)); 116 timespec_sub(new_system, old_system));
118 117
119 timekeeping_inject_sleeptime(&sleep_time); 118 if (sleep_time.tv_sec >= 0)
119 timekeeping_inject_sleeptime(&sleep_time);
120 return 0; 120 return 0;
121} 121}
122 122
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 8e286259a007..fa4d9f324189 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -319,6 +319,20 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
319} 319}
320EXPORT_SYMBOL_GPL(rtc_read_alarm); 320EXPORT_SYMBOL_GPL(rtc_read_alarm);
321 321
322static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
323{
324 int err;
325
326 if (!rtc->ops)
327 err = -ENODEV;
328 else if (!rtc->ops->set_alarm)
329 err = -EINVAL;
330 else
331 err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
332
333 return err;
334}
335
322static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 336static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
323{ 337{
324 struct rtc_time tm; 338 struct rtc_time tm;
@@ -342,14 +356,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
342 * over right here, before we set the alarm. 356 * over right here, before we set the alarm.
343 */ 357 */
344 358
345 if (!rtc->ops) 359 return ___rtc_set_alarm(rtc, alarm);
346 err = -ENODEV;
347 else if (!rtc->ops->set_alarm)
348 err = -EINVAL;
349 else
350 err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
351
352 return err;
353} 360}
354 361
355int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 362int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
@@ -763,6 +770,20 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
763 return 0; 770 return 0;
764} 771}
765 772
773static void rtc_alarm_disable(struct rtc_device *rtc)
774{
775 struct rtc_wkalrm alarm;
776 struct rtc_time tm;
777
778 __rtc_read_time(rtc, &tm);
779
780 alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
781 ktime_set(300, 0)));
782 alarm.enabled = 0;
783
784 ___rtc_set_alarm(rtc, &alarm);
785}
786
766/** 787/**
767 * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue 788 * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
768 * @rtc rtc device 789 * @rtc rtc device
@@ -784,8 +805,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
784 struct rtc_wkalrm alarm; 805 struct rtc_wkalrm alarm;
785 int err; 806 int err;
786 next = timerqueue_getnext(&rtc->timerqueue); 807 next = timerqueue_getnext(&rtc->timerqueue);
787 if (!next) 808 if (!next) {
809 rtc_alarm_disable(rtc);
788 return; 810 return;
811 }
789 alarm.time = rtc_ktime_to_tm(next->expires); 812 alarm.time = rtc_ktime_to_tm(next->expires);
790 alarm.enabled = 1; 813 alarm.enabled = 1;
791 err = __rtc_set_alarm(rtc, &alarm); 814 err = __rtc_set_alarm(rtc, &alarm);
@@ -847,7 +870,8 @@ again:
847 err = __rtc_set_alarm(rtc, &alarm); 870 err = __rtc_set_alarm(rtc, &alarm);
848 if (err == -ETIME) 871 if (err == -ETIME)
849 goto again; 872 goto again;
850 } 873 } else
874 rtc_alarm_disable(rtc);
851 875
852 mutex_unlock(&rtc->ops_lock); 876 mutex_unlock(&rtc->ops_lock);
853} 877}
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index d33544802a2e..bb21f443fb70 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -76,12 +76,15 @@ static inline unsigned char vrtc_is_updating(void)
76/* 76/*
77 * rtc_time's year contains the increment over 1900, but vRTC's YEAR 77 * rtc_time's year contains the increment over 1900, but vRTC's YEAR
78 * register can't be programmed to value larger than 0x64, so vRTC 78 * register can't be programmed to value larger than 0x64, so vRTC
79 * driver chose to use 1960 (1970 is UNIX time start point) as the base, 79 * driver chose to use 1972 (1970 is UNIX time start point) as the base,
80 * and does the translation at read/write time. 80 * and does the translation at read/write time.
81 * 81 *
82 * Why not just use 1970 as the offset? it's because using 1960 will 82 * Why not just use 1970 as the offset? it's because using 1972 will
83 * make it consistent in leap year setting for both vrtc and low-level 83 * make it consistent in leap year setting for both vrtc and low-level
84 * physical rtc devices. 84 * physical rtc devices. Then why not use 1960 as the offset? If we use
85 * 1960, for a device's first use, its YEAR register is 0 and the system
86 * year will be parsed as 1960 which is not a valid UNIX time and will
87 * cause many applications to fail mysteriously.
85 */ 88 */
86static int mrst_read_time(struct device *dev, struct rtc_time *time) 89static int mrst_read_time(struct device *dev, struct rtc_time *time)
87{ 90{
@@ -99,10 +102,10 @@ static int mrst_read_time(struct device *dev, struct rtc_time *time)
99 time->tm_year = vrtc_cmos_read(RTC_YEAR); 102 time->tm_year = vrtc_cmos_read(RTC_YEAR);
100 spin_unlock_irqrestore(&rtc_lock, flags); 103 spin_unlock_irqrestore(&rtc_lock, flags);
101 104
102 /* Adjust for the 1960/1900 */ 105 /* Adjust for the 1972/1900 */
103 time->tm_year += 60; 106 time->tm_year += 72;
104 time->tm_mon--; 107 time->tm_mon--;
105 return RTC_24H; 108 return rtc_valid_tm(time);
106} 109}
107 110
108static int mrst_set_time(struct device *dev, struct rtc_time *time) 111static int mrst_set_time(struct device *dev, struct rtc_time *time)
@@ -119,9 +122,9 @@ static int mrst_set_time(struct device *dev, struct rtc_time *time)
119 min = time->tm_min; 122 min = time->tm_min;
120 sec = time->tm_sec; 123 sec = time->tm_sec;
121 124
122 if (yrs < 70 || yrs > 138) 125 if (yrs < 72 || yrs > 138)
123 return -EINVAL; 126 return -EINVAL;
124 yrs -= 60; 127 yrs -= 72;
125 128
126 spin_lock_irqsave(&rtc_lock, flags); 129 spin_lock_irqsave(&rtc_lock, flags);
127 130
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index b3eba3cddd42..e4b6880aabd0 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -220,7 +220,7 @@ static void puv3_rtc_enable(struct platform_device *pdev, int en)
220 } 220 }
221} 221}
222 222
223static int puv3_rtc_remove(struct platform_device *dev) 223static int __devexit puv3_rtc_remove(struct platform_device *dev)
224{ 224{
225 struct rtc_device *rtc = platform_get_drvdata(dev); 225 struct rtc_device *rtc = platform_get_drvdata(dev);
226 226
@@ -236,7 +236,7 @@ static int puv3_rtc_remove(struct platform_device *dev)
236 return 0; 236 return 0;
237} 237}
238 238
239static int puv3_rtc_probe(struct platform_device *pdev) 239static int __devinit puv3_rtc_probe(struct platform_device *pdev)
240{ 240{
241 struct rtc_device *rtc; 241 struct rtc_device *rtc;
242 struct resource *res; 242 struct resource *res;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7639ab906f02..5b979d9cc332 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -202,7 +202,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
202 void __iomem *base = s3c_rtc_base; 202 void __iomem *base = s3c_rtc_base;
203 int year = tm->tm_year - 100; 203 int year = tm->tm_year - 100;
204 204
205 clk_enable(rtc_clk);
206 pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n", 205 pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
207 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, 206 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
208 tm->tm_hour, tm->tm_min, tm->tm_sec); 207 tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -214,6 +213,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
214 return -EINVAL; 213 return -EINVAL;
215 } 214 }
216 215
216 clk_enable(rtc_clk);
217 writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC); 217 writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC);
218 writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN); 218 writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN);
219 writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR); 219 writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 43068fbd0baa..1b6d9247fdc7 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -641,6 +641,8 @@ static int __init zcore_init(void)
641 641
642 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 642 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
643 return -ENODATA; 643 return -ENODATA;
644 if (OLDMEM_BASE)
645 return -ENODATA;
644 646
645 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long)); 647 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
646 debug_register_view(zcore_dbf, &debug_sprintf_view); 648 debug_register_view(zcore_dbf, &debug_sprintf_view);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 75c3f1f8fd43..a84631a7391d 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -529,10 +529,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
529int chsc_chp_vary(struct chp_id chpid, int on) 529int chsc_chp_vary(struct chp_id chpid, int on)
530{ 530{
531 struct channel_path *chp = chpid_to_chp(chpid); 531 struct channel_path *chp = chpid_to_chp(chpid);
532 struct chp_link link;
533 532
534 memset(&link, 0, sizeof(struct chp_link));
535 link.chpid = chpid;
536 /* Wait until previous actions have settled. */ 533 /* Wait until previous actions have settled. */
537 css_wait_for_slow_path(); 534 css_wait_for_slow_path();
538 /* 535 /*
@@ -542,10 +539,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
542 /* Try to update the channel path descritor. */ 539 /* Try to update the channel path descritor. */
543 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 540 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
544 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 541 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
545 __s390_vary_chpid_on, &link); 542 __s390_vary_chpid_on, &chpid);
546 } else 543 } else
547 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 544 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
548 NULL, &link); 545 NULL, &chpid);
549 546
550 return 0; 547 return 0;
551} 548}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 155a82bcb9e5..4a1ff5c2eb88 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,8 +68,13 @@ struct schib {
68 __u8 mda[4]; /* model dependent area */ 68 __u8 mda[4]; /* model dependent area */
69} __attribute__ ((packed,aligned(4))); 69} __attribute__ ((packed,aligned(4)));
70 70
71/*
72 * When rescheduled, todo's with higher values will overwrite those
73 * with lower values.
74 */
71enum sch_todo { 75enum sch_todo {
72 SCH_TODO_NOTHING, 76 SCH_TODO_NOTHING,
77 SCH_TODO_EVAL,
73 SCH_TODO_UNREG, 78 SCH_TODO_UNREG,
74}; 79};
75 80
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 92d7324acb1c..21908e67bf67 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch)
195} 195}
196EXPORT_SYMBOL_GPL(css_sch_device_unregister); 196EXPORT_SYMBOL_GPL(css_sch_device_unregister);
197 197
198static void css_sch_todo(struct work_struct *work)
199{
200 struct subchannel *sch;
201 enum sch_todo todo;
202
203 sch = container_of(work, struct subchannel, todo_work);
204 /* Find out todo. */
205 spin_lock_irq(sch->lock);
206 todo = sch->todo;
207 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
208 sch->schid.sch_no, todo);
209 sch->todo = SCH_TODO_NOTHING;
210 spin_unlock_irq(sch->lock);
211 /* Perform todo. */
212 if (todo == SCH_TODO_UNREG)
213 css_sch_device_unregister(sch);
214 /* Release workqueue ref. */
215 put_device(&sch->dev);
216}
217
218/**
219 * css_sched_sch_todo - schedule a subchannel operation
220 * @sch: subchannel
221 * @todo: todo
222 *
223 * Schedule the operation identified by @todo to be performed on the slow path
224 * workqueue. Do nothing if another operation with higher priority is already
225 * scheduled. Needs to be called with subchannel lock held.
226 */
227void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
228{
229 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
230 sch->schid.ssid, sch->schid.sch_no, todo);
231 if (sch->todo >= todo)
232 return;
233 /* Get workqueue ref. */
234 if (!get_device(&sch->dev))
235 return;
236 sch->todo = todo;
237 if (!queue_work(cio_work_q, &sch->todo_work)) {
238 /* Already queued, release workqueue ref. */
239 put_device(&sch->dev);
240 }
241}
242
243static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 198static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
244{ 199{
245 int i; 200 int i;
@@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
466 css_schedule_eval(schid); 421 css_schedule_eval(schid);
467} 422}
468 423
424/**
425 * css_sched_sch_todo - schedule a subchannel operation
426 * @sch: subchannel
427 * @todo: todo
428 *
429 * Schedule the operation identified by @todo to be performed on the slow path
430 * workqueue. Do nothing if another operation with higher priority is already
431 * scheduled. Needs to be called with subchannel lock held.
432 */
433void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
434{
435 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
436 sch->schid.ssid, sch->schid.sch_no, todo);
437 if (sch->todo >= todo)
438 return;
439 /* Get workqueue ref. */
440 if (!get_device(&sch->dev))
441 return;
442 sch->todo = todo;
443 if (!queue_work(cio_work_q, &sch->todo_work)) {
444 /* Already queued, release workqueue ref. */
445 put_device(&sch->dev);
446 }
447}
448
449static void css_sch_todo(struct work_struct *work)
450{
451 struct subchannel *sch;
452 enum sch_todo todo;
453 int ret;
454
455 sch = container_of(work, struct subchannel, todo_work);
456 /* Find out todo. */
457 spin_lock_irq(sch->lock);
458 todo = sch->todo;
459 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
460 sch->schid.sch_no, todo);
461 sch->todo = SCH_TODO_NOTHING;
462 spin_unlock_irq(sch->lock);
463 /* Perform todo. */
464 switch (todo) {
465 case SCH_TODO_NOTHING:
466 break;
467 case SCH_TODO_EVAL:
468 ret = css_evaluate_known_subchannel(sch, 1);
469 if (ret == -EAGAIN) {
470 spin_lock_irq(sch->lock);
471 css_sched_sch_todo(sch, todo);
472 spin_unlock_irq(sch->lock);
473 }
474 break;
475 case SCH_TODO_UNREG:
476 css_sch_device_unregister(sch);
477 break;
478 }
479 /* Release workqueue ref. */
480 put_device(&sch->dev);
481}
482
469static struct idset *slow_subchannel_set; 483static struct idset *slow_subchannel_set;
470static spinlock_t slow_subchannel_lock; 484static spinlock_t slow_subchannel_lock;
471static wait_queue_head_t css_eval_wq; 485static wait_queue_head_t css_eval_wq;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d734f4a0ecac..47269858ecb6 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
1868 */ 1868 */
1869 cdev->private->flags.resuming = 1; 1869 cdev->private->flags.resuming = 1;
1870 cdev->private->path_new_mask = LPM_ANYPATH; 1870 cdev->private->path_new_mask = LPM_ANYPATH;
1871 css_schedule_eval(sch->schid); 1871 css_sched_sch_todo(sch, SCH_TODO_EVAL);
1872 spin_unlock_irq(sch->lock); 1872 spin_unlock_irq(sch->lock);
1873 css_complete_work(); 1873 css_wait_for_slow_path();
1874 1874
1875 /* cdev may have been moved to a different subchannel. */ 1875 /* cdev may have been moved to a different subchannel. */
1876 sch = to_subchannel(cdev->dev.parent); 1876 sch = to_subchannel(cdev->dev.parent);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 52c233fa2b12..1b853513c891 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -496,8 +496,26 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev)
496 cdev->private->pgid_reset_mask = 0; 496 cdev->private->pgid_reset_mask = 0;
497} 497}
498 498
499void 499static void create_fake_irb(struct irb *irb, int type)
500ccw_device_verify_done(struct ccw_device *cdev, int err) 500{
501 memset(irb, 0, sizeof(*irb));
502 if (type == FAKE_CMD_IRB) {
503 struct cmd_scsw *scsw = &irb->scsw.cmd;
504 scsw->cc = 1;
505 scsw->fctl = SCSW_FCTL_START_FUNC;
506 scsw->actl = SCSW_ACTL_START_PEND;
507 scsw->stctl = SCSW_STCTL_STATUS_PEND;
508 } else if (type == FAKE_TM_IRB) {
509 struct tm_scsw *scsw = &irb->scsw.tm;
510 scsw->x = 1;
511 scsw->cc = 1;
512 scsw->fctl = SCSW_FCTL_START_FUNC;
513 scsw->actl = SCSW_ACTL_START_PEND;
514 scsw->stctl = SCSW_STCTL_STATUS_PEND;
515 }
516}
517
518void ccw_device_verify_done(struct ccw_device *cdev, int err)
501{ 519{
502 struct subchannel *sch; 520 struct subchannel *sch;
503 521
@@ -520,12 +538,8 @@ callback:
520 ccw_device_done(cdev, DEV_STATE_ONLINE); 538 ccw_device_done(cdev, DEV_STATE_ONLINE);
521 /* Deliver fake irb to device driver, if needed. */ 539 /* Deliver fake irb to device driver, if needed. */
522 if (cdev->private->flags.fake_irb) { 540 if (cdev->private->flags.fake_irb) {
523 memset(&cdev->private->irb, 0, sizeof(struct irb)); 541 create_fake_irb(&cdev->private->irb,
524 cdev->private->irb.scsw.cmd.cc = 1; 542 cdev->private->flags.fake_irb);
525 cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
526 cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
527 cdev->private->irb.scsw.cmd.stctl =
528 SCSW_STCTL_STATUS_PEND;
529 cdev->private->flags.fake_irb = 0; 543 cdev->private->flags.fake_irb = 0;
530 if (cdev->handler) 544 if (cdev->handler)
531 cdev->handler(cdev, cdev->private->intparm, 545 cdev->handler(cdev, cdev->private->intparm,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index f98698d5735e..ec7fb6d3b479 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
198 if (cdev->private->state == DEV_STATE_VERIFY) { 198 if (cdev->private->state == DEV_STATE_VERIFY) {
199 /* Remember to fake irb when finished. */ 199 /* Remember to fake irb when finished. */
200 if (!cdev->private->flags.fake_irb) { 200 if (!cdev->private->flags.fake_irb) {
201 cdev->private->flags.fake_irb = 1; 201 cdev->private->flags.fake_irb = FAKE_CMD_IRB;
202 cdev->private->intparm = intparm; 202 cdev->private->intparm = intparm;
203 return 0; 203 return 0;
204 } else 204 } else
@@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
213 ret = cio_set_options (sch, flags); 213 ret = cio_set_options (sch, flags);
214 if (ret) 214 if (ret)
215 return ret; 215 return ret;
216 /* Adjust requested path mask to excluded varied off paths. */ 216 /* Adjust requested path mask to exclude unusable paths. */
217 if (lpm) { 217 if (lpm) {
218 lpm &= sch->opm; 218 lpm &= sch->lpm;
219 if (lpm == 0) 219 if (lpm == 0)
220 return -EACCES; 220 return -EACCES;
221 } 221 }
@@ -605,11 +605,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
605 sch = to_subchannel(cdev->dev.parent); 605 sch = to_subchannel(cdev->dev.parent);
606 if (!sch->schib.pmcw.ena) 606 if (!sch->schib.pmcw.ena)
607 return -EINVAL; 607 return -EINVAL;
608 if (cdev->private->state == DEV_STATE_VERIFY) {
609 /* Remember to fake irb when finished. */
610 if (!cdev->private->flags.fake_irb) {
611 cdev->private->flags.fake_irb = FAKE_TM_IRB;
612 cdev->private->intparm = intparm;
613 return 0;
614 } else
615 /* There's already a fake I/O around. */
616 return -EBUSY;
617 }
608 if (cdev->private->state != DEV_STATE_ONLINE) 618 if (cdev->private->state != DEV_STATE_ONLINE)
609 return -EIO; 619 return -EIO;
610 /* Adjust requested path mask to excluded varied off paths. */ 620 /* Adjust requested path mask to exclude unusable paths. */
611 if (lpm) { 621 if (lpm) {
612 lpm &= sch->opm; 622 lpm &= sch->lpm;
613 if (lpm == 0) 623 if (lpm == 0)
614 return -EACCES; 624 return -EACCES;
615 } 625 }
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 2ebb492a5c17..76253dfcc1be 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -111,6 +111,9 @@ enum cdev_todo {
111 CDEV_TODO_UNREG_EVAL, 111 CDEV_TODO_UNREG_EVAL,
112}; 112};
113 113
114#define FAKE_CMD_IRB 1
115#define FAKE_TM_IRB 2
116
114struct ccw_device_private { 117struct ccw_device_private {
115 struct ccw_device *cdev; 118 struct ccw_device *cdev;
116 struct subchannel *sch; 119 struct subchannel *sch;
@@ -138,7 +141,7 @@ struct ccw_device_private {
138 unsigned int doverify:1; /* delayed path verification */ 141 unsigned int doverify:1; /* delayed path verification */
139 unsigned int donotify:1; /* call notify function */ 142 unsigned int donotify:1; /* call notify function */
140 unsigned int recog_done:1; /* dev. recog. complete */ 143 unsigned int recog_done:1; /* dev. recog. complete */
141 unsigned int fake_irb:1; /* deliver faked irb */ 144 unsigned int fake_irb:2; /* deliver faked irb */
142 unsigned int resuming:1; /* recognition while resume */ 145 unsigned int resuming:1; /* recognition while resume */
143 unsigned int pgroup:1; /* pathgroup is set up */ 146 unsigned int pgroup:1; /* pathgroup is set up */
144 unsigned int mpath:1; /* multipathing is set up */ 147 unsigned int mpath:1; /* multipathing is set up */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index b77ae519d79c..96bbe9d12a79 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1271,18 +1271,16 @@ ap_config_timeout(unsigned long ptr)
1271} 1271}
1272 1272
1273/** 1273/**
1274 * ap_schedule_poll_timer(): Schedule poll timer. 1274 * __ap_schedule_poll_timer(): Schedule poll timer.
1275 * 1275 *
1276 * Set up the timer to run the poll tasklet 1276 * Set up the timer to run the poll tasklet
1277 */ 1277 */
1278static inline void ap_schedule_poll_timer(void) 1278static inline void __ap_schedule_poll_timer(void)
1279{ 1279{
1280 ktime_t hr_time; 1280 ktime_t hr_time;
1281 1281
1282 spin_lock_bh(&ap_poll_timer_lock); 1282 spin_lock_bh(&ap_poll_timer_lock);
1283 if (ap_using_interrupts() || ap_suspend_flag) 1283 if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
1284 goto out;
1285 if (hrtimer_is_queued(&ap_poll_timer))
1286 goto out; 1284 goto out;
1287 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { 1285 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1288 hr_time = ktime_set(0, poll_timeout); 1286 hr_time = ktime_set(0, poll_timeout);
@@ -1294,6 +1292,18 @@ out:
1294} 1292}
1295 1293
1296/** 1294/**
1295 * ap_schedule_poll_timer(): Schedule poll timer.
1296 *
1297 * Set up the timer to run the poll tasklet
1298 */
1299static inline void ap_schedule_poll_timer(void)
1300{
1301 if (ap_using_interrupts())
1302 return;
1303 __ap_schedule_poll_timer();
1304}
1305
1306/**
1297 * ap_poll_read(): Receive pending reply messages from an AP device. 1307 * ap_poll_read(): Receive pending reply messages from an AP device.
1298 * @ap_dev: pointer to the AP device 1308 * @ap_dev: pointer to the AP device
1299 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1309 * @flags: pointer to control flags, bit 2^0 is set if another poll is
@@ -1374,8 +1384,9 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1374 *flags |= 1; 1384 *flags |= 1;
1375 *flags |= 2; 1385 *flags |= 2;
1376 break; 1386 break;
1377 case AP_RESPONSE_Q_FULL:
1378 case AP_RESPONSE_RESET_IN_PROGRESS: 1387 case AP_RESPONSE_RESET_IN_PROGRESS:
1388 __ap_schedule_poll_timer();
1389 case AP_RESPONSE_Q_FULL:
1379 *flags |= 2; 1390 *flags |= 2;
1380 break; 1391 break;
1381 case AP_RESPONSE_MESSAGE_TOO_BIG: 1392 case AP_RESPONSE_MESSAGE_TOO_BIG:
@@ -1541,6 +1552,8 @@ static void ap_reset(struct ap_device *ap_dev)
1541 rc = ap_init_queue(ap_dev->qid); 1552 rc = ap_init_queue(ap_dev->qid);
1542 if (rc == -ENODEV) 1553 if (rc == -ENODEV)
1543 ap_dev->unregistered = 1; 1554 ap_dev->unregistered = 1;
1555 else
1556 __ap_schedule_poll_timer();
1544} 1557}
1545 1558
1546static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) 1559static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index fa80ba1f0344..9b66d2d1809b 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -4,7 +4,7 @@ menu "S/390 network device drivers"
4config LCS 4config LCS
5 def_tristate m 5 def_tristate m
6 prompt "Lan Channel Station Interface" 6 prompt "Lan Channel Station Interface"
7 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI) 7 depends on CCW && NETDEVICES && (ETHERNET || TR || FDDI)
8 help 8 help
9 Select this option if you want to use LCS networking on IBM System z. 9 Select this option if you want to use LCS networking on IBM System z.
10 This device driver supports Token Ring (IEEE 802.5), 10 This device driver supports Token Ring (IEEE 802.5),
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index c28713da1ec5..863fc2197155 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -50,7 +50,7 @@
50#include "lcs.h" 50#include "lcs.h"
51 51
52 52
53#if !defined(CONFIG_NET_ETHERNET) && \ 53#if !defined(CONFIG_ETHERNET) && \
54 !defined(CONFIG_TR) && !defined(CONFIG_FDDI) 54 !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
55#error Cannot compile lcs.c without some net devices switched on. 55#error Cannot compile lcs.c without some net devices switched on.
56#endif 56#endif
@@ -1634,7 +1634,7 @@ lcs_startlan_auto(struct lcs_card *card)
1634 int rc; 1634 int rc;
1635 1635
1636 LCS_DBF_TEXT(2, trace, "strtauto"); 1636 LCS_DBF_TEXT(2, trace, "strtauto");
1637#ifdef CONFIG_NET_ETHERNET 1637#ifdef CONFIG_ETHERNET
1638 card->lan_type = LCS_FRAME_TYPE_ENET; 1638 card->lan_type = LCS_FRAME_TYPE_ENET;
1639 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); 1639 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1640 if (rc == 0) 1640 if (rc == 0)
@@ -2166,7 +2166,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2166 goto netdev_out; 2166 goto netdev_out;
2167 } 2167 }
2168 switch (card->lan_type) { 2168 switch (card->lan_type) {
2169#ifdef CONFIG_NET_ETHERNET 2169#ifdef CONFIG_ETHERNET
2170 case LCS_FRAME_TYPE_ENET: 2170 case LCS_FRAME_TYPE_ENET:
2171 card->lan_type_trans = eth_type_trans; 2171 card->lan_type_trans = eth_type_trans;
2172 dev = alloc_etherdev(0); 2172 dev = alloc_etherdev(0);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 3251333a23df..b6a6356d09b3 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1994,6 +1994,8 @@ static struct net_device *netiucv_init_netdevice(char *username)
1994 netiucv_setup_netdevice); 1994 netiucv_setup_netdevice);
1995 if (!dev) 1995 if (!dev)
1996 return NULL; 1996 return NULL;
1997 if (dev_alloc_name(dev, dev->name) < 0)
1998 goto out_netdev;
1997 1999
1998 privptr = netdev_priv(dev); 2000 privptr = netdev_priv(dev);
1999 privptr->fsm = init_fsm("netiucvdev", dev_state_names, 2001 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index b77c65ed1381..4abc79d3963f 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -236,8 +236,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
236#define QETH_IN_BUF_COUNT_MAX 128 236#define QETH_IN_BUF_COUNT_MAX 128
237#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12) 237#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
238#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \ 238#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
239 ((card)->ssqd.qdioac1 & AC1_SIGA_INPUT_NEEDED ? 1 : \ 239 ((card)->qdio.in_buf_pool.buf_count / 2)
240 ((card)->qdio.in_buf_pool.buf_count / 2))
241 240
242/* buffers we have to be behind before we get a PCI */ 241/* buffers we have to be behind before we get a PCI */
243#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1) 242#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 81534437373a..fff57de78943 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -881,7 +881,6 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread);
881void qeth_schedule_recovery(struct qeth_card *card) 881void qeth_schedule_recovery(struct qeth_card *card)
882{ 882{
883 QETH_CARD_TEXT(card, 2, "startrec"); 883 QETH_CARD_TEXT(card, 2, "startrec");
884 WARN_ON(1);
885 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) 884 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
886 schedule_work(&card->kernel_thread_starter); 885 schedule_work(&card->kernel_thread_starter);
887} 886}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e4c1176ee25b..4d5307ddbe55 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2756,11 +2756,13 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2756 struct neighbour *n = NULL; 2756 struct neighbour *n = NULL;
2757 struct dst_entry *dst; 2757 struct dst_entry *dst;
2758 2758
2759 rcu_read_lock();
2759 dst = skb_dst(skb); 2760 dst = skb_dst(skb);
2760 if (dst) 2761 if (dst)
2761 n = dst_get_neighbour(dst); 2762 n = dst_get_neighbour(dst);
2762 if (n) { 2763 if (n) {
2763 cast_type = n->type; 2764 cast_type = n->type;
2765 rcu_read_unlock();
2764 if ((cast_type == RTN_BROADCAST) || 2766 if ((cast_type == RTN_BROADCAST) ||
2765 (cast_type == RTN_MULTICAST) || 2767 (cast_type == RTN_MULTICAST) ||
2766 (cast_type == RTN_ANYCAST)) 2768 (cast_type == RTN_ANYCAST))
@@ -2768,6 +2770,8 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2768 else 2770 else
2769 return RTN_UNSPEC; 2771 return RTN_UNSPEC;
2770 } 2772 }
2773 rcu_read_unlock();
2774
2771 /* try something else */ 2775 /* try something else */
2772 if (skb->protocol == ETH_P_IPV6) 2776 if (skb->protocol == ETH_P_IPV6)
2773 return (skb_network_header(skb)[24] == 0xff) ? 2777 return (skb_network_header(skb)[24] == 0xff) ?
@@ -2847,6 +2851,8 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2847 } 2851 }
2848 2852
2849 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr); 2853 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
2854
2855 rcu_read_lock();
2850 dst = skb_dst(skb); 2856 dst = skb_dst(skb);
2851 if (dst) 2857 if (dst)
2852 n = dst_get_neighbour(dst); 2858 n = dst_get_neighbour(dst);
@@ -2893,6 +2899,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2893 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU; 2899 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
2894 } 2900 }
2895 } 2901 }
2902 rcu_read_unlock();
2896} 2903}
2897 2904
2898static inline void qeth_l3_hdr_csum(struct qeth_card *card, 2905static inline void qeth_l3_hdr_csum(struct qeth_card *card,
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 0ea2fbfe0e99..d979bb26522f 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -335,10 +335,10 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
335 QETH_IN_BUF_COUNT_MAX) 335 QETH_IN_BUF_COUNT_MAX)
336 qeth_realloc_buffer_pool(card, 336 qeth_realloc_buffer_pool(card,
337 QETH_IN_BUF_COUNT_MAX); 337 QETH_IN_BUF_COUNT_MAX);
338 break;
339 } else 338 } else
340 rc = -EPERM; 339 rc = -EPERM;
341 default: /* fall through */ 340 break;
341 default:
342 rc = -EINVAL; 342 rc = -EINVAL;
343 } 343 }
344out: 344out:
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 5f94d22c491e..542668292900 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -233,13 +233,9 @@ int bbc_i2c_write_buf(struct bbc_i2c_client *client,
233 int ret = 0; 233 int ret = 0;
234 234
235 while (len > 0) { 235 while (len > 0) {
236 int err = bbc_i2c_writeb(client, *buf, off); 236 ret = bbc_i2c_writeb(client, *buf, off);
237 237 if (ret < 0)
238 if (err < 0) {
239 ret = err;
240 break; 238 break;
241 }
242
243 len--; 239 len--;
244 buf++; 240 buf++;
245 off++; 241 off++;
@@ -253,11 +249,9 @@ int bbc_i2c_read_buf(struct bbc_i2c_client *client,
253 int ret = 0; 249 int ret = 0;
254 250
255 while (len > 0) { 251 while (len > 0) {
256 int err = bbc_i2c_readb(client, buf, off); 252 ret = bbc_i2c_readb(client, buf, off);
257 if (err < 0) { 253 if (ret < 0)
258 ret = err;
259 break; 254 break;
260 }
261 len--; 255 len--;
262 buf++; 256 buf++;
263 off++; 257 off++;
@@ -422,17 +416,6 @@ static struct platform_driver bbc_i2c_driver = {
422 .remove = __devexit_p(bbc_i2c_remove), 416 .remove = __devexit_p(bbc_i2c_remove),
423}; 417};
424 418
425static int __init bbc_i2c_init(void) 419module_platform_driver(bbc_i2c_driver);
426{
427 return platform_driver_register(&bbc_i2c_driver);
428}
429
430static void __exit bbc_i2c_exit(void)
431{
432 platform_driver_unregister(&bbc_i2c_driver);
433}
434
435module_init(bbc_i2c_init);
436module_exit(bbc_i2c_exit);
437 420
438MODULE_LICENSE("GPL"); 421MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 965a1fccd66a..4b9939726c34 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -275,15 +275,4 @@ static struct platform_driver d7s_driver = {
275 .remove = __devexit_p(d7s_remove), 275 .remove = __devexit_p(d7s_remove),
276}; 276};
277 277
278static int __init d7s_init(void) 278module_platform_driver(d7s_driver);
279{
280 return platform_driver_register(&d7s_driver);
281}
282
283static void __exit d7s_exit(void)
284{
285 platform_driver_unregister(&d7s_driver);
286}
287
288module_init(d7s_init);
289module_exit(d7s_exit);
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index be7b4e56154f..339fd6f65eda 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1138,16 +1138,6 @@ static struct platform_driver envctrl_driver = {
1138 .remove = __devexit_p(envctrl_remove), 1138 .remove = __devexit_p(envctrl_remove),
1139}; 1139};
1140 1140
1141static int __init envctrl_init(void) 1141module_platform_driver(envctrl_driver);
1142{
1143 return platform_driver_register(&envctrl_driver);
1144}
1145
1146static void __exit envctrl_exit(void)
1147{
1148 platform_driver_unregister(&envctrl_driver);
1149}
1150 1142
1151module_init(envctrl_init);
1152module_exit(envctrl_exit);
1153MODULE_LICENSE("GPL"); 1143MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 73dd4e7afaaa..826157f38694 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -216,16 +216,6 @@ static struct platform_driver flash_driver = {
216 .remove = __devexit_p(flash_remove), 216 .remove = __devexit_p(flash_remove),
217}; 217};
218 218
219static int __init flash_init(void) 219module_platform_driver(flash_driver);
220{
221 return platform_driver_register(&flash_driver);
222}
223
224static void __exit flash_cleanup(void)
225{
226 platform_driver_unregister(&flash_driver);
227}
228 220
229module_init(flash_init);
230module_exit(flash_cleanup);
231MODULE_LICENSE("GPL"); 221MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index ebce9639a26a..0b31658ccde5 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -435,16 +435,6 @@ static struct platform_driver uctrl_driver = {
435}; 435};
436 436
437 437
438static int __init uctrl_init(void) 438module_platform_driver(uctrl_driver);
439{
440 return platform_driver_register(&uctrl_driver);
441}
442
443static void __exit uctrl_exit(void)
444{
445 platform_driver_unregister(&uctrl_driver);
446}
447 439
448module_init(uctrl_init);
449module_exit(uctrl_exit);
450MODULE_LICENSE("GPL"); 440MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4aa76d6f11df..705e13e470af 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -38,6 +38,7 @@
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/moduleparam.h> 39#include <linux/moduleparam.h>
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/pci-aspm.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
42#include <linux/mutex.h> 43#include <linux/mutex.h>
43#include <linux/spinlock.h> 44#include <linux/spinlock.h>
@@ -1109,6 +1110,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1109 unique_id++; 1110 unique_id++;
1110 } 1111 }
1111 1112
1113 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1114 PCIE_LINK_STATE_CLKPM);
1115
1112 error = pci_enable_device(pdev); 1116 error = pci_enable_device(pdev);
1113 if (error) 1117 if (error)
1114 goto out; 1118 goto out;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index e76107b2ade3..865d452542be 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/pci-aspm.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/delay.h> 29#include <linux/delay.h>
@@ -3922,6 +3923,10 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
3922 dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); 3923 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
3923 return -ENODEV; 3924 return -ENODEV;
3924 } 3925 }
3926
3927 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
3928 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
3929
3925 err = pci_enable_device(h->pdev); 3930 err = pci_enable_device(h->pdev);
3926 if (err) { 3931 if (err) {
3927 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 3932 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 8889b1babcac..4e041f6d808c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2802,6 +2802,11 @@ _scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc)
2802 2802
2803 if (ioc->is_driver_loading) 2803 if (ioc->is_driver_loading)
2804 return; 2804 return;
2805
2806 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
2807 if (!fw_event)
2808 return;
2809
2805 fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES; 2810 fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES;
2806 fw_event->ioc = ioc; 2811 fw_event->ioc = ioc;
2807 _scsih_fw_event_add(ioc, fw_event); 2812 _scsih_fw_event_add(ioc, fw_event);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 06bc26554a67..f85cfa6c47b5 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1409,6 +1409,8 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1409 1409
1410 blk_start_request(req); 1410 blk_start_request(req);
1411 1411
1412 scmd_printk(KERN_INFO, cmd, "killing request\n");
1413
1412 sdev = cmd->device; 1414 sdev = cmd->device;
1413 starget = scsi_target(sdev); 1415 starget = scsi_target(sdev);
1414 shost = sdev->host; 1416 shost = sdev->host;
@@ -1490,7 +1492,6 @@ static void scsi_request_fn(struct request_queue *q)
1490 struct request *req; 1492 struct request *req;
1491 1493
1492 if (!sdev) { 1494 if (!sdev) {
1493 printk("scsi: killing requests for dead queue\n");
1494 while ((req = blk_peek_request(q)) != NULL) 1495 while ((req = blk_peek_request(q)) != NULL)
1495 scsi_kill_request(req, q); 1496 scsi_kill_request(req, q);
1496 return; 1497 return;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 72273a0e5666..b3c6d957fbd8 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -319,11 +319,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
319 return sdev; 319 return sdev;
320 320
321out_device_destroy: 321out_device_destroy:
322 scsi_device_set_state(sdev, SDEV_DEL); 322 __scsi_remove_device(sdev);
323 transport_destroy_device(&sdev->sdev_gendev);
324 put_device(&sdev->sdev_dev);
325 scsi_free_queue(sdev->request_queue);
326 put_device(&sdev->sdev_gendev);
327out: 323out:
328 if (display_failure_msg) 324 if (display_failure_msg)
329 printk(ALLOC_FAILURE_MSG, __func__); 325 printk(ALLOC_FAILURE_MSG, __func__);
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 24e6cec0ae8d..67e272ab1623 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -7,3 +7,11 @@ obj-$(CONFIG_HAVE_CLK) += clk/
7obj-$(CONFIG_MAPLE) += maple/ 7obj-$(CONFIG_MAPLE) += maple/
8obj-$(CONFIG_SUPERHYWAY) += superhyway/ 8obj-$(CONFIG_SUPERHYWAY) += superhyway/
9obj-$(CONFIG_GENERIC_GPIO) += pfc.o 9obj-$(CONFIG_GENERIC_GPIO) += pfc.o
10
11#
12# For the moment we only use this framework for ARM-based SH/R-Mobile
13# platforms and generic SH. SH-based SH-Mobile platforms are still using
14# an older framework that is pending up-porting, at which point this
15# special casing can go away.
16#
17obj-$(CONFIG_SUPERH)$(CONFIG_ARCH_SHMOBILE) += pm_runtime.o
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index dc8d022c07a1..db257a35e71a 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -25,7 +25,6 @@
25#include <linux/seq_file.h> 25#include <linux/seq_file.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/debugfs.h>
29#include <linux/cpufreq.h> 28#include <linux/cpufreq.h>
30#include <linux/clk.h> 29#include <linux/clk.h>
31#include <linux/sh_clk.h> 30#include <linux/sh_clk.h>
@@ -173,6 +172,26 @@ long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
173 return clk_rate_round_helper(&div_range_round); 172 return clk_rate_round_helper(&div_range_round);
174} 173}
175 174
175static long clk_rate_mult_range_iter(unsigned int pos,
176 struct clk_rate_round_data *rounder)
177{
178 return clk_get_rate(rounder->arg) * pos;
179}
180
181long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
182 unsigned int mult_max, unsigned long rate)
183{
184 struct clk_rate_round_data mult_range_round = {
185 .min = mult_min,
186 .max = mult_max,
187 .func = clk_rate_mult_range_iter,
188 .arg = clk_get_parent(clk),
189 .rate = rate,
190 };
191
192 return clk_rate_round_helper(&mult_range_round);
193}
194
176int clk_rate_table_find(struct clk *clk, 195int clk_rate_table_find(struct clk *clk,
177 struct cpufreq_frequency_table *freq_table, 196 struct cpufreq_frequency_table *freq_table,
178 unsigned long rate) 197 unsigned long rate)
@@ -205,9 +224,6 @@ int clk_reparent(struct clk *child, struct clk *parent)
205 list_add(&child->sibling, &parent->children); 224 list_add(&child->sibling, &parent->children);
206 child->parent = parent; 225 child->parent = parent;
207 226
208 /* now do the debugfs renaming to reattach the child
209 to the proper parent */
210
211 return 0; 227 return 0;
212} 228}
213 229
@@ -665,89 +681,6 @@ static int __init clk_syscore_init(void)
665subsys_initcall(clk_syscore_init); 681subsys_initcall(clk_syscore_init);
666#endif 682#endif
667 683
668/*
669 * debugfs support to trace clock tree hierarchy and attributes
670 */
671static struct dentry *clk_debugfs_root;
672
673static int clk_debugfs_register_one(struct clk *c)
674{
675 int err;
676 struct dentry *d;
677 struct clk *pa = c->parent;
678 char s[255];
679 char *p = s;
680
681 p += sprintf(p, "%p", c);
682 d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
683 if (!d)
684 return -ENOMEM;
685 c->dentry = d;
686
687 d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
688 if (!d) {
689 err = -ENOMEM;
690 goto err_out;
691 }
692 d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
693 if (!d) {
694 err = -ENOMEM;
695 goto err_out;
696 }
697 d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
698 if (!d) {
699 err = -ENOMEM;
700 goto err_out;
701 }
702 return 0;
703
704err_out:
705 debugfs_remove_recursive(c->dentry);
706 return err;
707}
708
709static int clk_debugfs_register(struct clk *c)
710{
711 int err;
712 struct clk *pa = c->parent;
713
714 if (pa && !pa->dentry) {
715 err = clk_debugfs_register(pa);
716 if (err)
717 return err;
718 }
719
720 if (!c->dentry) {
721 err = clk_debugfs_register_one(c);
722 if (err)
723 return err;
724 }
725 return 0;
726}
727
728static int __init clk_debugfs_init(void)
729{
730 struct clk *c;
731 struct dentry *d;
732 int err;
733
734 d = debugfs_create_dir("clock", NULL);
735 if (!d)
736 return -ENOMEM;
737 clk_debugfs_root = d;
738
739 list_for_each_entry(c, &clock_list, node) {
740 err = clk_debugfs_register(c);
741 if (err)
742 goto err_out;
743 }
744 return 0;
745err_out:
746 debugfs_remove_recursive(clk_debugfs_root);
747 return err;
748}
749late_initcall(clk_debugfs_init);
750
751static int __init clk_late_init(void) 684static int __init clk_late_init(void)
752{ 685{
753 unsigned long flags; 686 unsigned long flags;
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/drivers/sh/pm_runtime.c
index bd5c6a3b8c55..afe9282629b9 100644
--- a/arch/arm/mach-shmobile/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -1,7 +1,5 @@
1/* 1/*
2 * arch/arm/mach-shmobile/pm_runtime.c 2 * Runtime PM support code
3 *
4 * Runtime PM support code for SuperH Mobile ARM
5 * 3 *
6 * Copyright (C) 2009-2010 Magnus Damm 4 * Copyright (C) 2009-2010 Magnus Damm
7 * 5 *
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a1fd73df5416..8ba4510a9519 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -199,7 +199,7 @@ config SPI_FSL_LIB
199 depends on FSL_SOC 199 depends on FSL_SOC
200 200
201config SPI_FSL_SPI 201config SPI_FSL_SPI
202 tristate "Freescale SPI controller" 202 bool "Freescale SPI controller"
203 depends on FSL_SOC 203 depends on FSL_SOC
204 select SPI_FSL_LIB 204 select SPI_FSL_LIB
205 help 205 help
@@ -208,7 +208,7 @@ config SPI_FSL_SPI
208 MPC8569 uses the controller in QE mode, MPC8610 in cpu mode. 208 MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
209 209
210config SPI_FSL_ESPI 210config SPI_FSL_ESPI
211 tristate "Freescale eSPI controller" 211 bool "Freescale eSPI controller"
212 depends on FSL_SOC 212 depends on FSL_SOC
213 select SPI_FSL_LIB 213 select SPI_FSL_LIB
214 help 214 help
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 024b48aed5ca..acc88b4d2869 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/module.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <linux/spinlock.h> 19#include <linux/spinlock.h>
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 79665e2e6ec5..16d6a839c7fa 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -907,7 +907,7 @@ static void atmel_spi_cleanup(struct spi_device *spi)
907 907
908/*-------------------------------------------------------------------------*/ 908/*-------------------------------------------------------------------------*/
909 909
910static int __init atmel_spi_probe(struct platform_device *pdev) 910static int __devinit atmel_spi_probe(struct platform_device *pdev)
911{ 911{
912 struct resource *regs; 912 struct resource *regs;
913 int irq; 913 int irq;
@@ -1003,7 +1003,7 @@ out_free:
1003 return ret; 1003 return ret;
1004} 1004}
1005 1005
1006static int __exit atmel_spi_remove(struct platform_device *pdev) 1006static int __devexit atmel_spi_remove(struct platform_device *pdev)
1007{ 1007{
1008 struct spi_master *master = platform_get_drvdata(pdev); 1008 struct spi_master *master = platform_get_drvdata(pdev);
1009 struct atmel_spi *as = spi_master_get_devdata(master); 1009 struct atmel_spi *as = spi_master_get_devdata(master);
@@ -1072,6 +1072,7 @@ static struct platform_driver atmel_spi_driver = {
1072 }, 1072 },
1073 .suspend = atmel_spi_suspend, 1073 .suspend = atmel_spi_suspend,
1074 .resume = atmel_spi_resume, 1074 .resume = atmel_spi_resume,
1075 .probe = atmel_spi_probe,
1075 .remove = __exit_p(atmel_spi_remove), 1076 .remove = __exit_p(atmel_spi_remove),
1076}; 1077};
1077module_platform_driver(atmel_spi_driver); 1078module_platform_driver(atmel_spi_driver);
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index e093d3ec41ba..0094c645ff0d 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
256 spi_bitbang_cleanup(spi); 256 spi_bitbang_cleanup(spi);
257} 257}
258 258
259static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in) 259static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
260{ 260{
261 int value; 261 int value;
262 262
@@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
270 return value; 270 return value;
271} 271}
272 272
273static int __init 273static int __devinit
274spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label, 274spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
275 u16 *res_flags) 275 u16 *res_flags)
276{ 276{
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index e763254741c2..182e9c873822 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -8,6 +8,7 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/module.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/spinlock.h> 13#include <linux/spinlock.h>
13#include <linux/workqueue.h> 14#include <linux/workqueue.h>
@@ -426,7 +427,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
426 goto err_clk; 427 goto err_clk;
427 } 428 }
428 429
429 mfp_set_groupg(&pdev->dev); 430 mfp_set_groupg(&pdev->dev, NULL);
430 nuc900_init_spi(hw); 431 nuc900_init_spi(hw);
431 432
432 err = spi_bitbang_start(&hw->bitbang); 433 err = spi_bitbang_start(&hw->bitbang);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index f103e470cb63..5559b2299198 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2184,6 +2184,12 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2184 goto err_clk_prep; 2184 goto err_clk_prep;
2185 } 2185 }
2186 2186
2187 status = clk_enable(pl022->clk);
2188 if (status) {
2189 dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
2190 goto err_no_clk_en;
2191 }
2192
2187 /* Disable SSP */ 2193 /* Disable SSP */
2188 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), 2194 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
2189 SSP_CR1(pl022->virtbase)); 2195 SSP_CR1(pl022->virtbase));
@@ -2237,6 +2243,8 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2237 2243
2238 free_irq(adev->irq[0], pl022); 2244 free_irq(adev->irq[0], pl022);
2239 err_no_irq: 2245 err_no_irq:
2246 clk_disable(pl022->clk);
2247 err_no_clk_en:
2240 clk_unprepare(pl022->clk); 2248 clk_unprepare(pl022->clk);
2241 err_clk_prep: 2249 err_clk_prep:
2242 clk_put(pl022->clk); 2250 clk_put(pl022->clk);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 84c934c0a545..520e8286db28 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -517,10 +517,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
517 517
518static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc) 518static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
519{ 519{
520 ssb_pcicore_fix_sprom_core_index(pc); 520 struct ssb_device *pdev = pc->dev;
521 struct ssb_bus *bus = pdev->bus;
522
523 if (bus->bustype == SSB_BUSTYPE_PCI)
524 ssb_pcicore_fix_sprom_core_index(pc);
521 525
522 /* Disable PCI interrupts. */ 526 /* Disable PCI interrupts. */
523 ssb_write32(pc->dev, SSB_INTVEC, 0); 527 ssb_write32(pdev, SSB_INTVEC, 0);
524 528
525 /* Additional PCIe always once-executed workarounds */ 529 /* Additional PCIe always once-executed workarounds */
526 if (pc->dev->id.coreid == SSB_DEV_PCIE) { 530 if (pc->dev->id.coreid == SSB_DEV_PCIE) {
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 21d8c1c16cd8..5e78c77d5a08 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -671,7 +671,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
671 } 671 }
672 672
673 insns = 673 insns =
674 kmalloc(sizeof(struct comedi_insn) * insnlist.n_insns, GFP_KERNEL); 674 kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL);
675 if (!insns) { 675 if (!insns) {
676 DPRINTK("kmalloc failed\n"); 676 DPRINTK("kmalloc failed\n");
677 ret = -ENOMEM; 677 ret = -ENOMEM;
@@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
1432 return ret; 1432 return ret;
1433} 1433}
1434 1434
1435static void comedi_unmap(struct vm_area_struct *area) 1435
1436static void comedi_vm_open(struct vm_area_struct *area)
1437{
1438 struct comedi_async *async;
1439 struct comedi_device *dev;
1440
1441 async = area->vm_private_data;
1442 dev = async->subdevice->device;
1443
1444 mutex_lock(&dev->mutex);
1445 async->mmap_count++;
1446 mutex_unlock(&dev->mutex);
1447}
1448
1449static void comedi_vm_close(struct vm_area_struct *area)
1436{ 1450{
1437 struct comedi_async *async; 1451 struct comedi_async *async;
1438 struct comedi_device *dev; 1452 struct comedi_device *dev;
@@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area)
1446} 1460}
1447 1461
1448static struct vm_operations_struct comedi_vm_ops = { 1462static struct vm_operations_struct comedi_vm_ops = {
1449 .close = comedi_unmap, 1463 .open = comedi_vm_open,
1464 .close = comedi_vm_close,
1450}; 1465};
1451 1466
1452static int comedi_mmap(struct file *file, struct vm_area_struct *vma) 1467static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
1453{ 1468{
1454 const unsigned minor = iminor(file->f_dentry->d_inode); 1469 const unsigned minor = iminor(file->f_dentry->d_inode);
1455 struct comedi_device_file_info *dev_file_info =
1456 comedi_get_device_file_info(minor);
1457 struct comedi_device *dev = dev_file_info->device;
1458 struct comedi_async *async = NULL; 1470 struct comedi_async *async = NULL;
1459 unsigned long start = vma->vm_start; 1471 unsigned long start = vma->vm_start;
1460 unsigned long size; 1472 unsigned long size;
@@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
1462 int i; 1474 int i;
1463 int retval; 1475 int retval;
1464 struct comedi_subdevice *s; 1476 struct comedi_subdevice *s;
1477 struct comedi_device_file_info *dev_file_info;
1478 struct comedi_device *dev;
1479
1480 dev_file_info = comedi_get_device_file_info(minor);
1481 if (dev_file_info == NULL)
1482 return -ENODEV;
1483 dev = dev_file_info->device;
1484 if (dev == NULL)
1485 return -ENODEV;
1465 1486
1466 mutex_lock(&dev->mutex); 1487 mutex_lock(&dev->mutex);
1467 if (!dev->attached) { 1488 if (!dev->attached) {
@@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
1528{ 1549{
1529 unsigned int mask = 0; 1550 unsigned int mask = 0;
1530 const unsigned minor = iminor(file->f_dentry->d_inode); 1551 const unsigned minor = iminor(file->f_dentry->d_inode);
1531 struct comedi_device_file_info *dev_file_info =
1532 comedi_get_device_file_info(minor);
1533 struct comedi_device *dev = dev_file_info->device;
1534 struct comedi_subdevice *read_subdev; 1552 struct comedi_subdevice *read_subdev;
1535 struct comedi_subdevice *write_subdev; 1553 struct comedi_subdevice *write_subdev;
1554 struct comedi_device_file_info *dev_file_info;
1555 struct comedi_device *dev;
1556 dev_file_info = comedi_get_device_file_info(minor);
1557
1558 if (dev_file_info == NULL)
1559 return -ENODEV;
1560 dev = dev_file_info->device;
1561 if (dev == NULL)
1562 return -ENODEV;
1536 1563
1537 mutex_lock(&dev->mutex); 1564 mutex_lock(&dev->mutex);
1538 if (!dev->attached) { 1565 if (!dev->attached) {
@@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
1578 int n, m, count = 0, retval = 0; 1605 int n, m, count = 0, retval = 0;
1579 DECLARE_WAITQUEUE(wait, current); 1606 DECLARE_WAITQUEUE(wait, current);
1580 const unsigned minor = iminor(file->f_dentry->d_inode); 1607 const unsigned minor = iminor(file->f_dentry->d_inode);
1581 struct comedi_device_file_info *dev_file_info = 1608 struct comedi_device_file_info *dev_file_info;
1582 comedi_get_device_file_info(minor); 1609 struct comedi_device *dev;
1583 struct comedi_device *dev = dev_file_info->device; 1610 dev_file_info = comedi_get_device_file_info(minor);
1611
1612 if (dev_file_info == NULL)
1613 return -ENODEV;
1614 dev = dev_file_info->device;
1615 if (dev == NULL)
1616 return -ENODEV;
1584 1617
1585 if (!dev->attached) { 1618 if (!dev->attached) {
1586 DPRINTK("no driver configured on comedi%i\n", dev->minor); 1619 DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
1640 retval = -EAGAIN; 1673 retval = -EAGAIN;
1641 break; 1674 break;
1642 } 1675 }
1676 schedule();
1643 if (signal_pending(current)) { 1677 if (signal_pending(current)) {
1644 retval = -ERESTARTSYS; 1678 retval = -ERESTARTSYS;
1645 break; 1679 break;
1646 } 1680 }
1647 schedule();
1648 if (!s->busy) 1681 if (!s->busy)
1649 break; 1682 break;
1650 if (s->busy != file) { 1683 if (s->busy != file) {
@@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
1683 int n, m, count = 0, retval = 0; 1716 int n, m, count = 0, retval = 0;
1684 DECLARE_WAITQUEUE(wait, current); 1717 DECLARE_WAITQUEUE(wait, current);
1685 const unsigned minor = iminor(file->f_dentry->d_inode); 1718 const unsigned minor = iminor(file->f_dentry->d_inode);
1686 struct comedi_device_file_info *dev_file_info = 1719 struct comedi_device_file_info *dev_file_info;
1687 comedi_get_device_file_info(minor); 1720 struct comedi_device *dev;
1688 struct comedi_device *dev = dev_file_info->device; 1721 dev_file_info = comedi_get_device_file_info(minor);
1722
1723 if (dev_file_info == NULL)
1724 return -ENODEV;
1725 dev = dev_file_info->device;
1726 if (dev == NULL)
1727 return -ENODEV;
1689 1728
1690 if (!dev->attached) { 1729 if (!dev->attached) {
1691 DPRINTK("no driver configured on comedi%i\n", dev->minor); 1730 DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
1741 retval = -EAGAIN; 1780 retval = -EAGAIN;
1742 break; 1781 break;
1743 } 1782 }
1783 schedule();
1744 if (signal_pending(current)) { 1784 if (signal_pending(current)) {
1745 retval = -ERESTARTSYS; 1785 retval = -ERESTARTSYS;
1746 break; 1786 break;
1747 } 1787 }
1748 schedule();
1749 if (!s->busy) { 1788 if (!s->busy) {
1750 retval = 0; 1789 retval = 0;
1751 break; 1790 break;
@@ -1885,11 +1924,17 @@ ok:
1885static int comedi_close(struct inode *inode, struct file *file) 1924static int comedi_close(struct inode *inode, struct file *file)
1886{ 1925{
1887 const unsigned minor = iminor(inode); 1926 const unsigned minor = iminor(inode);
1888 struct comedi_device_file_info *dev_file_info =
1889 comedi_get_device_file_info(minor);
1890 struct comedi_device *dev = dev_file_info->device;
1891 struct comedi_subdevice *s = NULL; 1927 struct comedi_subdevice *s = NULL;
1892 int i; 1928 int i;
1929 struct comedi_device_file_info *dev_file_info;
1930 struct comedi_device *dev;
1931 dev_file_info = comedi_get_device_file_info(minor);
1932
1933 if (dev_file_info == NULL)
1934 return -ENODEV;
1935 dev = dev_file_info->device;
1936 if (dev == NULL)
1937 return -ENODEV;
1893 1938
1894 mutex_lock(&dev->mutex); 1939 mutex_lock(&dev->mutex);
1895 1940
@@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file)
1923static int comedi_fasync(int fd, struct file *file, int on) 1968static int comedi_fasync(int fd, struct file *file, int on)
1924{ 1969{
1925 const unsigned minor = iminor(file->f_dentry->d_inode); 1970 const unsigned minor = iminor(file->f_dentry->d_inode);
1926 struct comedi_device_file_info *dev_file_info = 1971 struct comedi_device_file_info *dev_file_info;
1927 comedi_get_device_file_info(minor); 1972 struct comedi_device *dev;
1973 dev_file_info = comedi_get_device_file_info(minor);
1928 1974
1929 struct comedi_device *dev = dev_file_info->device; 1975 if (dev_file_info == NULL)
1976 return -ENODEV;
1977 dev = dev_file_info->device;
1978 if (dev == NULL)
1979 return -ENODEV;
1930 1980
1931 return fasync_helper(fd, file, on, &dev->async_queue); 1981 return fasync_helper(fd, file, on, &dev->async_queue);
1932} 1982}
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index a8fea9a91733..6144afb8cbaa 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1,4 +1,4 @@
1#define DRIVER_VERSION "v0.5" 1#define DRIVER_VERSION "v0.6"
2#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com" 2#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
3#define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com" 3#define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"
4/* 4/*
@@ -25,7 +25,7 @@ Driver: usbduxsigma
25Description: University of Stirling USB DAQ & INCITE Technology Limited 25Description: University of Stirling USB DAQ & INCITE Technology Limited
26Devices: [ITL] USB-DUX (usbduxsigma.o) 26Devices: [ITL] USB-DUX (usbduxsigma.o)
27Author: Bernd Porr <BerndPorr@f2s.com> 27Author: Bernd Porr <BerndPorr@f2s.com>
28Updated: 21 Jul 2011 28Updated: 8 Nov 2011
29Status: testing 29Status: testing
30*/ 30*/
31/* 31/*
@@ -44,6 +44,7 @@ Status: testing
44 * 0.3: proper vendor ID and driver name 44 * 0.3: proper vendor ID and driver name
45 * 0.4: fixed D/A voltage range 45 * 0.4: fixed D/A voltage range
46 * 0.5: various bug fixes, health check at startup 46 * 0.5: various bug fixes, health check at startup
47 * 0.6: corrected wrong input range
47 */ 48 */
48 49
49/* generates loads of debug info */ 50/* generates loads of debug info */
@@ -175,7 +176,7 @@ Status: testing
175/* comedi constants */ 176/* comedi constants */
176static const struct comedi_lrange range_usbdux_ai_range = { 1, { 177static const struct comedi_lrange range_usbdux_ai_range = { 1, {
177 BIP_RANGE 178 BIP_RANGE
178 (2.65) 179 (2.65/2.0)
179 } 180 }
180}; 181};
181 182
diff --git a/drivers/staging/et131x/Kconfig b/drivers/staging/et131x/Kconfig
index 9e1864c6dfd0..8190f2aaf53b 100644
--- a/drivers/staging/et131x/Kconfig
+++ b/drivers/staging/et131x/Kconfig
@@ -1,6 +1,7 @@
1config ET131X 1config ET131X
2 tristate "Agere ET-1310 Gigabit Ethernet support" 2 tristate "Agere ET-1310 Gigabit Ethernet support"
3 depends on PCI 3 depends on PCI && NET && NETDEVICES
4 select PHYLIB
4 default n 5 default n
5 ---help--- 6 ---help---
6 This driver supports Agere ET-1310 ethernet adapters. 7 This driver supports Agere ET-1310 ethernet adapters.
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index f5f44a02456f..0c1c6ca8c379 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -4469,6 +4469,12 @@ static int et131x_resume(struct device *dev)
4469 return 0; 4469 return 0;
4470} 4470}
4471 4471
4472static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4473#define ET131X_PM_OPS (&et131x_pm_ops)
4474#else
4475#define ET131X_PM_OPS NULL
4476#endif
4477
4472/* ISR functions */ 4478/* ISR functions */
4473 4479
4474/** 4480/**
@@ -5470,12 +5476,6 @@ err_out:
5470 return result; 5476 return result;
5471} 5477}
5472 5478
5473static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
5474#define ET131X_PM_OPS (&et131x_pm_ops)
5475#else
5476#define ET131X_PM_OPS NULL
5477#endif
5478
5479static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = { 5479static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
5480 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, 5480 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
5481 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, 5481 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 326e967d54ef..aec9311b108c 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -242,19 +242,26 @@ static const struct file_operations iio_event_chrdev_fileops = {
242 242
243static int iio_event_getfd(struct iio_dev *indio_dev) 243static int iio_event_getfd(struct iio_dev *indio_dev)
244{ 244{
245 if (indio_dev->event_interface == NULL) 245 struct iio_event_interface *ev_int = indio_dev->event_interface;
246 int fd;
247
248 if (ev_int == NULL)
246 return -ENODEV; 249 return -ENODEV;
247 250
248 mutex_lock(&indio_dev->event_interface->event_list_lock); 251 mutex_lock(&ev_int->event_list_lock);
249 if (test_and_set_bit(IIO_BUSY_BIT_POS, 252 if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
250 &indio_dev->event_interface->flags)) { 253 mutex_unlock(&ev_int->event_list_lock);
251 mutex_unlock(&indio_dev->event_interface->event_list_lock);
252 return -EBUSY; 254 return -EBUSY;
253 } 255 }
254 mutex_unlock(&indio_dev->event_interface->event_list_lock); 256 mutex_unlock(&ev_int->event_list_lock);
255 return anon_inode_getfd("iio:event", 257 fd = anon_inode_getfd("iio:event",
256 &iio_event_chrdev_fileops, 258 &iio_event_chrdev_fileops, ev_int, O_RDONLY);
257 indio_dev->event_interface, O_RDONLY); 259 if (fd < 0) {
260 mutex_lock(&ev_int->event_list_lock);
261 clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
262 mutex_unlock(&ev_int->event_list_lock);
263 }
264 return fd;
258} 265}
259 266
260static int __init iio_init(void) 267static int __init iio_init(void)
diff --git a/drivers/staging/media/as102/as102_drv.c b/drivers/staging/media/as102/as102_drv.c
index d335c7d6fa0f..828526d4c289 100644
--- a/drivers/staging/media/as102/as102_drv.c
+++ b/drivers/staging/media/as102/as102_drv.c
@@ -32,8 +32,8 @@
32#include "as102_fw.h" 32#include "as102_fw.h"
33#include "dvbdev.h" 33#include "dvbdev.h"
34 34
35int debug; 35int as102_debug;
36module_param_named(debug, debug, int, 0644); 36module_param_named(debug, as102_debug, int, 0644);
37MODULE_PARM_DESC(debug, "Turn on/off debugging (default: off)"); 37MODULE_PARM_DESC(debug, "Turn on/off debugging (default: off)");
38 38
39int dual_tuner; 39int dual_tuner;
diff --git a/drivers/staging/media/as102/as102_drv.h b/drivers/staging/media/as102/as102_drv.h
index bcda635b5a99..fd33f5a12dcc 100644
--- a/drivers/staging/media/as102/as102_drv.h
+++ b/drivers/staging/media/as102/as102_drv.h
@@ -37,7 +37,8 @@ extern struct spi_driver as102_spi_driver;
37#define DRIVER_FULL_NAME "Abilis Systems as10x usb driver" 37#define DRIVER_FULL_NAME "Abilis Systems as10x usb driver"
38#define DRIVER_NAME "as10x_usb" 38#define DRIVER_NAME "as10x_usb"
39 39
40extern int debug; 40extern int as102_debug;
41#define debug as102_debug
41 42
42#define dprintk(debug, args...) \ 43#define dprintk(debug, args...) \
43 do { if (debug) { \ 44 do { if (debug) { \
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index b445cd63f901..2542c3743904 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -275,7 +275,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
275 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; 275 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
276 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 276 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
277 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i; 277 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
278 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset)); 278 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset));
279 hw_buffer.s.size = fs->size; 279 hw_buffer.s.size = fs->size;
280 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; 280 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
281 } 281 }
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index fb2e89c3056c..5385da2e9cdb 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
89 {USB_DEVICE(0x0DF6, 0x0045)}, 89 {USB_DEVICE(0x0DF6, 0x0045)},
90 {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */ 90 {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
91 {USB_DEVICE(0x0DF6, 0x004B)}, 91 {USB_DEVICE(0x0DF6, 0x004B)},
92 {USB_DEVICE(0x0DF6, 0x005D)},
92 {USB_DEVICE(0x0DF6, 0x0063)}, 93 {USB_DEVICE(0x0DF6, 0x0063)},
93 /* Sweex */ 94 /* Sweex */
94 {USB_DEVICE(0x177F, 0x0154)}, 95 {USB_DEVICE(0x177F, 0x0154)},
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 480b0ed2e4de..115635f95024 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -1021,6 +1021,7 @@ static int __devinit rtsx_probe(struct pci_dev *pci,
1021 th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan"); 1021 th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
1022 if (IS_ERR(th)) { 1022 if (IS_ERR(th)) {
1023 printk(KERN_ERR "Unable to start the device-scanning thread\n"); 1023 printk(KERN_ERR "Unable to start the device-scanning thread\n");
1024 complete(&dev->scanning_done);
1024 quiesce_and_remove_host(dev); 1025 quiesce_and_remove_host(dev);
1025 err = PTR_ERR(th); 1026 err = PTR_ERR(th);
1026 goto errout; 1027 goto errout;
diff --git a/drivers/staging/slicoss/Kconfig b/drivers/staging/slicoss/Kconfig
index 5cde96b2e6e1..5c2a15b42dfe 100644
--- a/drivers/staging/slicoss/Kconfig
+++ b/drivers/staging/slicoss/Kconfig
@@ -1,6 +1,6 @@
1config SLICOSS 1config SLICOSS
2 tristate "Alacritech Gigabit IS-NIC support" 2 tristate "Alacritech Gigabit IS-NIC support"
3 depends on PCI && X86 3 depends on PCI && X86 && NET
4 default n 4 default n
5 help 5 help
6 This driver supports Alacritech's IS-NIC gigabit ethernet cards. 6 This driver supports Alacritech's IS-NIC gigabit ethernet cards.
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 3d1279c424a8..7eb56178fb64 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -54,6 +54,7 @@
54 54
55/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */ 55/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
56#define DMT_ID(id) ((id) + 4) 56#define DMT_ID(id) ((id) + 4)
57#define DM_TIMER_CLOCKS 4
57 58
58/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */ 59/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
59#define MCBSP_ID(id) ((id) - 6) 60#define MCBSP_ID(id) ((id) - 6)
@@ -114,8 +115,13 @@ static s8 get_clk_type(u8 id)
114 */ 115 */
115void dsp_clk_exit(void) 116void dsp_clk_exit(void)
116{ 117{
118 int i;
119
117 dsp_clock_disable_all(dsp_clocks); 120 dsp_clock_disable_all(dsp_clocks);
118 121
122 for (i = 0; i < DM_TIMER_CLOCKS; i++)
123 omap_dm_timer_free(timer[i]);
124
119 clk_put(iva2_clk); 125 clk_put(iva2_clk);
120 clk_put(ssi.sst_fck); 126 clk_put(ssi.sst_fck);
121 clk_put(ssi.ssr_fck); 127 clk_put(ssi.ssr_fck);
@@ -130,9 +136,13 @@ void dsp_clk_exit(void)
130void dsp_clk_init(void) 136void dsp_clk_init(void)
131{ 137{
132 static struct platform_device dspbridge_device; 138 static struct platform_device dspbridge_device;
139 int i, id;
133 140
134 dspbridge_device.dev.bus = &platform_bus_type; 141 dspbridge_device.dev.bus = &platform_bus_type;
135 142
143 for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
144 timer[i] = omap_dm_timer_request_specific(id);
145
136 iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck"); 146 iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
137 if (IS_ERR(iva2_clk)) 147 if (IS_ERR(iva2_clk))
138 dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk); 148 dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
@@ -204,8 +214,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
204 clk_enable(iva2_clk); 214 clk_enable(iva2_clk);
205 break; 215 break;
206 case GPT_CLK: 216 case GPT_CLK:
207 timer[clk_id - 1] = 217 status = omap_dm_timer_start(timer[clk_id - 1]);
208 omap_dm_timer_request_specific(DMT_ID(clk_id));
209 break; 218 break;
210#ifdef CONFIG_OMAP_MCBSP 219#ifdef CONFIG_OMAP_MCBSP
211 case MCBSP_CLK: 220 case MCBSP_CLK:
@@ -281,7 +290,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
281 clk_disable(iva2_clk); 290 clk_disable(iva2_clk);
282 break; 291 break;
283 case GPT_CLK: 292 case GPT_CLK:
284 omap_dm_timer_free(timer[clk_id - 1]); 293 status = omap_dm_timer_stop(timer[clk_id - 1]);
285 break; 294 break;
286#ifdef CONFIG_OMAP_MCBSP 295#ifdef CONFIG_OMAP_MCBSP
287 case MCBSP_CLK: 296 case MCBSP_CLK:
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index c43c7e3421c8..76cfc6edecd9 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -24,11 +24,7 @@
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/pm.h> 26#include <linux/pm.h>
27
28#ifdef MODULE
29#include <linux/module.h> 27#include <linux/module.h>
30#endif
31
32#include <linux/device.h> 28#include <linux/device.h>
33#include <linux/init.h> 29#include <linux/init.h>
34#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 09c44abb89e8..3872b8cccdcf 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
68{ 68{
69 struct usbip_device *ud = &vdev->ud; 69 struct usbip_device *ud = &vdev->ud;
70 struct urb *urb; 70 struct urb *urb;
71 unsigned long flags;
71 72
72 spin_lock(&vdev->priv_lock); 73 spin_lock(&vdev->priv_lock);
73 urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum); 74 urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
@@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
101 102
102 usbip_dbg_vhci_rx("now giveback urb %p\n", urb); 103 usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
103 104
104 spin_lock(&the_controller->lock); 105 spin_lock_irqsave(&the_controller->lock, flags);
105 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); 106 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
106 spin_unlock(&the_controller->lock); 107 spin_unlock_irqrestore(&the_controller->lock, flags);
107 108
108 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status); 109 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
109 110
@@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
141{ 142{
142 struct vhci_unlink *unlink; 143 struct vhci_unlink *unlink;
143 struct urb *urb; 144 struct urb *urb;
145 unsigned long flags;
144 146
145 usbip_dump_header(pdu); 147 usbip_dump_header(pdu);
146 148
@@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
170 urb->status = pdu->u.ret_unlink.status; 172 urb->status = pdu->u.ret_unlink.status;
171 pr_info("urb->status %d\n", urb->status); 173 pr_info("urb->status %d\n", urb->status);
172 174
173 spin_lock(&the_controller->lock); 175 spin_lock_irqsave(&the_controller->lock, flags);
174 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); 176 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
175 spin_unlock(&the_controller->lock); 177 spin_unlock_irqrestore(&the_controller->lock, flags);
176 178
177 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, 179 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
178 urb->status); 180 urb->status);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 0fd96c10271d..8599545cdf9e 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -614,13 +614,12 @@ int iscsit_add_reject(
614 hdr = (struct iscsi_reject *) cmd->pdu; 614 hdr = (struct iscsi_reject *) cmd->pdu;
615 hdr->reason = reason; 615 hdr->reason = reason;
616 616
617 cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); 617 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
618 if (!cmd->buf_ptr) { 618 if (!cmd->buf_ptr) {
619 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 619 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
620 iscsit_release_cmd(cmd); 620 iscsit_release_cmd(cmd);
621 return -1; 621 return -1;
622 } 622 }
623 memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
624 623
625 spin_lock_bh(&conn->cmd_lock); 624 spin_lock_bh(&conn->cmd_lock);
626 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 625 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
661 hdr = (struct iscsi_reject *) cmd->pdu; 660 hdr = (struct iscsi_reject *) cmd->pdu;
662 hdr->reason = reason; 661 hdr->reason = reason;
663 662
664 cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); 663 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
665 if (!cmd->buf_ptr) { 664 if (!cmd->buf_ptr) {
666 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 665 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
667 iscsit_release_cmd(cmd); 666 iscsit_release_cmd(cmd);
668 return -1; 667 return -1;
669 } 668 }
670 memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
671 669
672 if (add_to_conn) { 670 if (add_to_conn) {
673 spin_lock_bh(&conn->cmd_lock); 671 spin_lock_bh(&conn->cmd_lock);
@@ -1017,11 +1015,6 @@ done:
1017 " non-existent or non-exported iSCSI LUN:" 1015 " non-existent or non-exported iSCSI LUN:"
1018 " 0x%016Lx\n", get_unaligned_le64(&hdr->lun)); 1016 " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
1019 } 1017 }
1020 if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
1021 return iscsit_add_reject_from_cmd(
1022 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1023 1, 1, buf, cmd);
1024
1025 send_check_condition = 1; 1018 send_check_condition = 1;
1026 goto attach_cmd; 1019 goto attach_cmd;
1027 } 1020 }
@@ -1044,6 +1037,8 @@ done:
1044 */ 1037 */
1045 send_check_condition = 1; 1038 send_check_condition = 1;
1046 } else { 1039 } else {
1040 cmd->data_length = cmd->se_cmd.data_length;
1041
1047 if (iscsit_decide_list_to_build(cmd, payload_length) < 0) 1042 if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
1048 return iscsit_add_reject_from_cmd( 1043 return iscsit_add_reject_from_cmd(
1049 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1044 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1123,7 +1118,7 @@ attach_cmd:
1123 * the backend memory allocation. 1118 * the backend memory allocation.
1124 */ 1119 */
1125 ret = transport_generic_new_cmd(&cmd->se_cmd); 1120 ret = transport_generic_new_cmd(&cmd->se_cmd);
1126 if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) { 1121 if (ret < 0) {
1127 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1122 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1128 dump_immediate_data = 1; 1123 dump_immediate_data = 1;
1129 goto after_immediate_data; 1124 goto after_immediate_data;
@@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1341 1336
1342 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 1337 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1343 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) || 1338 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
1344 (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED)) 1339 (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
1345 dump_unsolicited_data = 1; 1340 dump_unsolicited_data = 1;
1346 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1341 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1347 1342
@@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
2513 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 2508 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2514 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 2509 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2515 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; 2510 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2516 hdr->residual_count = cpu_to_be32(cmd->residual_count); 2511 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2517 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 2512 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2518 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; 2513 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2519 hdr->residual_count = cpu_to_be32(cmd->residual_count); 2514 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2520 } 2515 }
2521 } 2516 }
2522 hton24(hdr->dlength, datain.length); 2517 hton24(hdr->dlength, datain.length);
@@ -3018,10 +3013,10 @@ static int iscsit_send_status(
3018 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3013 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3019 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 3014 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3020 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW; 3015 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
3021 hdr->residual_count = cpu_to_be32(cmd->residual_count); 3016 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3022 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 3017 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3023 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; 3018 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
3024 hdr->residual_count = cpu_to_be32(cmd->residual_count); 3019 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3025 } 3020 }
3026 hdr->response = cmd->iscsi_response; 3021 hdr->response = cmd->iscsi_response;
3027 hdr->cmd_status = cmd->se_cmd.scsi_status; 3022 hdr->cmd_status = cmd->se_cmd.scsi_status;
@@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
3133 hdr = (struct iscsi_tm_rsp *) cmd->pdu; 3128 hdr = (struct iscsi_tm_rsp *) cmd->pdu;
3134 memset(hdr, 0, ISCSI_HDR_LEN); 3129 memset(hdr, 0, ISCSI_HDR_LEN);
3135 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 3130 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
3131 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3136 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); 3132 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
3137 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3133 hdr->itt = cpu_to_be32(cmd->init_task_tag);
3138 cmd->stat_sn = conn->stat_sn++; 3134 cmd->stat_sn = conn->stat_sn++;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index beb39469e7f1..1cd6ce373b83 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -30,9 +30,11 @@
30 30
31static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len) 31static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
32{ 32{
33 int j = DIV_ROUND_UP(len, 2); 33 int j = DIV_ROUND_UP(len, 2), rc;
34 34
35 hex2bin(dst, src, j); 35 rc = hex2bin(dst, src, j);
36 if (rc < 0)
37 pr_debug("CHAP string contains non hex digit symbols\n");
36 38
37 dst[j] = '\0'; 39 dst[j] = '\0';
38 return j; 40 return j;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 3723d90d5ae5..f1a02dad05a0 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -398,7 +398,6 @@ struct iscsi_cmd {
398 u32 pdu_send_order; 398 u32 pdu_send_order;
399 /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */ 399 /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
400 u32 pdu_start; 400 u32 pdu_start;
401 u32 residual_count;
402 /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */ 401 /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
403 u32 seq_send_order; 402 u32 seq_send_order;
404 /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */ 403 /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@@ -535,7 +534,6 @@ struct iscsi_conn {
535 atomic_t connection_exit; 534 atomic_t connection_exit;
536 atomic_t connection_recovery; 535 atomic_t connection_recovery;
537 atomic_t connection_reinstatement; 536 atomic_t connection_reinstatement;
538 atomic_t connection_wait;
539 atomic_t connection_wait_rcfr; 537 atomic_t connection_wait_rcfr;
540 atomic_t sleep_on_conn_wait_comp; 538 atomic_t sleep_on_conn_wait_comp;
541 atomic_t transport_failed; 539 atomic_t transport_failed;
@@ -643,7 +641,6 @@ struct iscsi_session {
643 atomic_t session_reinstatement; 641 atomic_t session_reinstatement;
644 atomic_t session_stop_active; 642 atomic_t session_stop_active;
645 atomic_t sleep_on_sess_wait_comp; 643 atomic_t sleep_on_sess_wait_comp;
646 atomic_t transport_wait_cmds;
647 /* connection list */ 644 /* connection list */
648 struct list_head sess_conn_list; 645 struct list_head sess_conn_list;
649 struct list_head cr_active_list; 646 struct list_head cr_active_list;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index c4c68da3e500..101b1beb3bca 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
938 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well. 938 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
939 */ 939 */
940 if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) { 940 if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
941 if (se_cmd->se_cmd_flags & 941 if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
942 SCF_SCSI_RESERVATION_CONFLICT) {
943 cmd->i_state = ISTATE_SEND_STATUS; 942 cmd->i_state = ISTATE_SEND_STATUS;
944 spin_unlock_bh(&cmd->istate_lock); 943 spin_unlock_bh(&cmd->istate_lock);
945 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, 944 iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index daad362a93ce..d734bdec24f9 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
224 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 224 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
225 ISCSI_LOGIN_STATUS_NO_RESOURCES); 225 ISCSI_LOGIN_STATUS_NO_RESOURCES);
226 pr_err("Could not allocate memory for session\n"); 226 pr_err("Could not allocate memory for session\n");
227 return -1; 227 return -ENOMEM;
228 } 228 }
229 229
230 iscsi_login_set_conn_values(sess, conn, pdu->cid); 230 iscsi_login_set_conn_values(sess, conn, pdu->cid);
@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
250 pr_err("idr_pre_get() for sess_idr failed\n"); 250 pr_err("idr_pre_get() for sess_idr failed\n");
251 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 251 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
252 ISCSI_LOGIN_STATUS_NO_RESOURCES); 252 ISCSI_LOGIN_STATUS_NO_RESOURCES);
253 return -1; 253 kfree(sess);
254 return -ENOMEM;
254 } 255 }
255 spin_lock(&sess_idr_lock); 256 spin_lock(&sess_idr_lock);
256 idr_get_new(&sess_idr, NULL, &sess->session_index); 257 idr_get_new(&sess_idr, NULL, &sess->session_index);
@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
270 ISCSI_LOGIN_STATUS_NO_RESOURCES); 271 ISCSI_LOGIN_STATUS_NO_RESOURCES);
271 pr_err("Unable to allocate memory for" 272 pr_err("Unable to allocate memory for"
272 " struct iscsi_sess_ops.\n"); 273 " struct iscsi_sess_ops.\n");
273 return -1; 274 kfree(sess);
275 return -ENOMEM;
274 } 276 }
275 277
276 sess->se_sess = transport_init_session(); 278 sess->se_sess = transport_init_session();
277 if (!sess->se_sess) { 279 if (IS_ERR(sess->se_sess)) {
278 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 280 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
279 ISCSI_LOGIN_STATUS_NO_RESOURCES); 281 ISCSI_LOGIN_STATUS_NO_RESOURCES);
280 return -1; 282 kfree(sess);
283 return -ENOMEM;
281 } 284 }
282 285
283 return 0; 286 return 0;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 426cd4bf6a9a..98936cb7c294 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
981 return NULL; 981 return NULL;
982 } 982 }
983 983
984 login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); 984 login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
985 if (!login->req) { 985 if (!login->req) {
986 pr_err("Unable to allocate memory for Login Request.\n"); 986 pr_err("Unable to allocate memory for Login Request.\n");
987 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 987 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
988 ISCSI_LOGIN_STATUS_NO_RESOURCES); 988 ISCSI_LOGIN_STATUS_NO_RESOURCES);
989 goto out; 989 goto out;
990 } 990 }
991 memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
992 991
993 login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); 992 login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
994 if (!login->req_buf) { 993 if (!login->req_buf) {
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 3df1c9b8ae6b..81d5832fbbd5 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
113 scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr, 113 scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
114 &tl_cmd->tl_sense_buf[0]); 114 &tl_cmd->tl_sense_buf[0]);
115 115
116 /*
117 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
118 */
119 if (scsi_bidi_cmnd(sc)) 116 if (scsi_bidi_cmnd(sc))
120 se_cmd->t_tasks_bidi = 1; 117 se_cmd->se_cmd_flags |= SCF_BIDI;
118
121 /* 119 /*
122 * Locate the struct se_lun pointer and attach it to struct se_cmd 120 * Locate the struct se_lun pointer and attach it to struct se_cmd
123 */ 121 */
@@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
148 * Allocate the necessary tasks to complete the received CDB+data 146 * Allocate the necessary tasks to complete the received CDB+data
149 */ 147 */
150 ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); 148 ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
151 if (ret == -ENOMEM) { 149 if (ret != 0)
152 /* Out of Resources */ 150 return ret;
153 return PYX_TRANSPORT_LU_COMM_FAILURE;
154 } else if (ret == -EINVAL) {
155 /*
156 * Handle case for SAM_STAT_RESERVATION_CONFLICT
157 */
158 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
159 return PYX_TRANSPORT_RESERVATION_CONFLICT;
160 /*
161 * Otherwise, return SAM_STAT_CHECK_CONDITION and return
162 * sense data.
163 */
164 return PYX_TRANSPORT_USE_SENSE_REASON;
165 }
166
167 /* 151 /*
168 * For BIDI commands, pass in the extra READ buffer 152 * For BIDI commands, pass in the extra READ buffer
169 * to transport_generic_map_mem_to_cmd() below.. 153 * to transport_generic_map_mem_to_cmd() below..
170 */ 154 */
171 if (se_cmd->t_tasks_bidi) { 155 if (se_cmd->se_cmd_flags & SCF_BIDI) {
172 struct scsi_data_buffer *sdb = scsi_in(sc); 156 struct scsi_data_buffer *sdb = scsi_in(sc);
173 157
174 sgl_bidi = sdb->table.sgl; 158 sgl_bidi = sdb->table.sgl;
@@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
194 } 178 }
195 179
196 /* Tell the core about our preallocated memory */ 180 /* Tell the core about our preallocated memory */
197 ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), 181 return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
198 scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); 182 scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
199 if (ret < 0)
200 return PYX_TRANSPORT_LU_COMM_FAILURE;
201
202 return 0;
203} 183}
204 184
205/* 185/*
@@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
1360{ 1340{
1361 struct tcm_loop_hba *tl_hba = container_of(wwn, 1341 struct tcm_loop_hba *tl_hba = container_of(wwn,
1362 struct tcm_loop_hba, tl_hba_wwn); 1342 struct tcm_loop_hba, tl_hba_wwn);
1363 int host_no = tl_hba->sh->host_no; 1343
1344 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1345 " SAS Address: %s at Linux/SCSI Host ID: %d\n",
1346 tl_hba->tl_wwn_address, tl_hba->sh->host_no);
1364 /* 1347 /*
1365 * Call device_unregister() on the original tl_hba->dev. 1348 * Call device_unregister() on the original tl_hba->dev.
1366 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will 1349 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1367 * release *tl_hba; 1350 * release *tl_hba;
1368 */ 1351 */
1369 device_unregister(&tl_hba->dev); 1352 device_unregister(&tl_hba->dev);
1370
1371 pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
1372 " SAS Address: %s at Linux/SCSI Host ID: %d\n",
1373 config_item_name(&wwn->wwn_group.cg_item), host_no);
1374} 1353}
1375 1354
1376/* Start items for tcm_loop_cit */ 1355/* Start items for tcm_loop_cit */
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 88f2ad43ec8b..1dcbef499d6a 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
191 int alua_access_state, primary = 0, rc; 191 int alua_access_state, primary = 0, rc;
192 u16 tg_pt_id, rtpi; 192 u16 tg_pt_id, rtpi;
193 193
194 if (!l_port) 194 if (!l_port) {
195 return PYX_TRANSPORT_LU_COMM_FAILURE; 195 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
196 196 return -EINVAL;
197 }
197 buf = transport_kmap_first_data_page(cmd); 198 buf = transport_kmap_first_data_page(cmd);
198 199
199 /* 200 /*
@@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
203 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; 204 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
204 if (!l_tg_pt_gp_mem) { 205 if (!l_tg_pt_gp_mem) {
205 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); 206 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
206 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 207 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
208 rc = -EINVAL;
207 goto out; 209 goto out;
208 } 210 }
209 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 211 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
211 if (!l_tg_pt_gp) { 213 if (!l_tg_pt_gp) {
212 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 214 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
213 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); 215 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
214 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 216 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
217 rc = -EINVAL;
215 goto out; 218 goto out;
216 } 219 }
217 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); 220 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
220 if (!rc) { 223 if (!rc) {
221 pr_debug("Unable to process SET_TARGET_PORT_GROUPS" 224 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
222 " while TPGS_EXPLICT_ALUA is disabled\n"); 225 " while TPGS_EXPLICT_ALUA is disabled\n");
223 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 226 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
227 rc = -EINVAL;
224 goto out; 228 goto out;
225 } 229 }
226 230
@@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
245 * REQUEST, and the additional sense code set to INVALID 249 * REQUEST, and the additional sense code set to INVALID
246 * FIELD IN PARAMETER LIST. 250 * FIELD IN PARAMETER LIST.
247 */ 251 */
248 rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 252 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
253 rc = -EINVAL;
249 goto out; 254 goto out;
250 } 255 }
251 rc = -1; 256 rc = -1;
@@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
298 * throw an exception with ASCQ: INVALID_PARAMETER_LIST 303 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
299 */ 304 */
300 if (rc != 0) { 305 if (rc != 0) {
301 rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 306 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
307 rc = -EINVAL;
302 goto out; 308 goto out;
303 } 309 }
304 } else { 310 } else {
@@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
335 * INVALID_PARAMETER_LIST 341 * INVALID_PARAMETER_LIST
336 */ 342 */
337 if (rc != 0) { 343 if (rc != 0) {
338 rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 344 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
345 rc = -EINVAL;
339 goto out; 346 goto out;
340 } 347 }
341 } 348 }
@@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1184 * struct t10_alua_lu_gp. 1191 * struct t10_alua_lu_gp.
1185 */ 1192 */
1186 spin_lock(&lu_gps_lock); 1193 spin_lock(&lu_gps_lock);
1187 atomic_set(&lu_gp->lu_gp_shutdown, 1);
1188 list_del(&lu_gp->lu_gp_node); 1194 list_del(&lu_gp->lu_gp_node);
1189 alua_lu_gps_count--; 1195 alua_lu_gps_count--;
1190 spin_unlock(&lu_gps_lock); 1196 spin_unlock(&lu_gps_lock);
@@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1438 1444
1439 tg_pt_gp_mem->tg_pt = port; 1445 tg_pt_gp_mem->tg_pt = port;
1440 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem; 1446 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1441 atomic_set(&port->sep_tg_pt_gp_active, 1);
1442 1447
1443 return tg_pt_gp_mem; 1448 return tg_pt_gp_mem;
1444} 1449}
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 683ba02b8247..831468b3163d 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
478 if (cmd->data_length < 60) 478 if (cmd->data_length < 60)
479 return 0; 479 return 0;
480 480
481 buf[2] = 0x3c; 481 buf[3] = 0x3c;
482 /* Set HEADSUP, ORDSUP, SIMPSUP */ 482 /* Set HEADSUP, ORDSUP, SIMPSUP */
483 buf[5] = 0x07; 483 buf[5] = 0x07;
484 484
@@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
703 if (cmd->data_length < 4) { 703 if (cmd->data_length < 4) {
704 pr_err("SCSI Inquiry payload length: %u" 704 pr_err("SCSI Inquiry payload length: %u"
705 " too small for EVPD=1\n", cmd->data_length); 705 " too small for EVPD=1\n", cmd->data_length);
706 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
706 return -EINVAL; 707 return -EINVAL;
707 } 708 }
708 709
@@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
719 } 720 }
720 721
721 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 722 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
723 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
722 ret = -EINVAL; 724 ret = -EINVAL;
723 725
724out_unmap: 726out_unmap:
@@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
969 default: 971 default:
970 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 972 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
971 cdb[2] & 0x3f, cdb[3]); 973 cdb[2] & 0x3f, cdb[3]);
972 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; 974 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
975 return -EINVAL;
973 } 976 }
974 offset += length; 977 offset += length;
975 978
@@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
1027 if (cdb[1] & 0x01) { 1030 if (cdb[1] & 0x01) {
1028 pr_err("REQUEST_SENSE description emulation not" 1031 pr_err("REQUEST_SENSE description emulation not"
1029 " supported\n"); 1032 " supported\n");
1030 return PYX_TRANSPORT_INVALID_CDB_FIELD; 1033 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1034 return -ENOSYS;
1031 } 1035 }
1032 1036
1033 buf = transport_kmap_first_data_page(cmd); 1037 buf = transport_kmap_first_data_page(cmd);
@@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
1100 if (!dev->transport->do_discard) { 1104 if (!dev->transport->do_discard) {
1101 pr_err("UNMAP emulation not supported for: %s\n", 1105 pr_err("UNMAP emulation not supported for: %s\n",
1102 dev->transport->name); 1106 dev->transport->name);
1103 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1107 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1108 return -ENOSYS;
1104 } 1109 }
1105 1110
1106 /* First UNMAP block descriptor starts at 8 byte offset */ 1111 /* First UNMAP block descriptor starts at 8 byte offset */
@@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
1157 if (!dev->transport->do_discard) { 1162 if (!dev->transport->do_discard) {
1158 pr_err("WRITE_SAME emulation not supported" 1163 pr_err("WRITE_SAME emulation not supported"
1159 " for: %s\n", dev->transport->name); 1164 " for: %s\n", dev->transport->name);
1160 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1165 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1166 return -ENOSYS;
1161 } 1167 }
1162 1168
1163 if (cmd->t_task_cdb[0] == WRITE_SAME) 1169 if (cmd->t_task_cdb[0] == WRITE_SAME)
@@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
1193int target_emulate_synchronize_cache(struct se_task *task) 1199int target_emulate_synchronize_cache(struct se_task *task)
1194{ 1200{
1195 struct se_device *dev = task->task_se_cmd->se_dev; 1201 struct se_device *dev = task->task_se_cmd->se_dev;
1202 struct se_cmd *cmd = task->task_se_cmd;
1196 1203
1197 if (!dev->transport->do_sync_cache) { 1204 if (!dev->transport->do_sync_cache) {
1198 pr_err("SYNCHRONIZE_CACHE emulation not supported" 1205 pr_err("SYNCHRONIZE_CACHE emulation not supported"
1199 " for: %s\n", dev->transport->name); 1206 " for: %s\n", dev->transport->name);
1200 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1207 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1208 return -ENOSYS;
1201 } 1209 }
1202 1210
1203 dev->transport->do_sync_cache(task); 1211 dev->transport->do_sync_cache(task);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e0c1e8a8dd4e..93d4f6a1b798 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
67static struct config_group alua_group; 67static struct config_group alua_group;
68static struct config_group alua_lu_gps_group; 68static struct config_group alua_lu_gps_group;
69 69
70static DEFINE_SPINLOCK(se_device_lock);
71static LIST_HEAD(se_dev_list);
72
73static inline struct se_hba * 70static inline struct se_hba *
74item_to_hba(struct config_item *item) 71item_to_hba(struct config_item *item)
75{ 72{
@@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
2741 " struct se_subsystem_dev\n"); 2738 " struct se_subsystem_dev\n");
2742 goto unlock; 2739 goto unlock;
2743 } 2740 }
2744 INIT_LIST_HEAD(&se_dev->se_dev_node);
2745 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); 2741 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
2746 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); 2742 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
2747 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); 2743 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
2777 " from allocate_virtdevice()\n"); 2773 " from allocate_virtdevice()\n");
2778 goto out; 2774 goto out;
2779 } 2775 }
2780 spin_lock(&se_device_lock);
2781 list_add_tail(&se_dev->se_dev_node, &se_dev_list);
2782 spin_unlock(&se_device_lock);
2783 2776
2784 config_group_init_type_name(&se_dev->se_dev_group, name, 2777 config_group_init_type_name(&se_dev->se_dev_group, name,
2785 &target_core_dev_cit); 2778 &target_core_dev_cit);
@@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
2874 mutex_lock(&hba->hba_access_mutex); 2867 mutex_lock(&hba->hba_access_mutex);
2875 t = hba->transport; 2868 t = hba->transport;
2876 2869
2877 spin_lock(&se_device_lock);
2878 list_del(&se_dev->se_dev_node);
2879 spin_unlock(&se_device_lock);
2880
2881 dev_stat_grp = &se_dev->dev_stat_grps.stat_group; 2870 dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
2882 for (i = 0; dev_stat_grp->default_groups[i]; i++) { 2871 for (i = 0; dev_stat_grp->default_groups[i]; i++) {
2883 df_item = &dev_stat_grp->default_groups[i]->cg_item; 2872 df_item = &dev_stat_grp->default_groups[i]->cg_item;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ba5edec2c5f8..9b8639425472 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
104 se_cmd->se_lun = deve->se_lun; 104 se_cmd->se_lun = deve->se_lun;
105 se_cmd->pr_res_key = deve->pr_res_key; 105 se_cmd->pr_res_key = deve->pr_res_key;
106 se_cmd->orig_fe_lun = unpacked_lun; 106 se_cmd->orig_fe_lun = unpacked_lun;
107 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
108 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 107 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
109 } 108 }
110 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 109 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
137 se_lun = &se_sess->se_tpg->tpg_virt_lun0; 136 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
138 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; 137 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
139 se_cmd->orig_fe_lun = 0; 138 se_cmd->orig_fe_lun = 0;
140 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
141 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 139 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
142 } 140 }
143 /* 141 /*
@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
200 se_lun = deve->se_lun; 198 se_lun = deve->se_lun;
201 se_cmd->pr_res_key = deve->pr_res_key; 199 se_cmd->pr_res_key = deve->pr_res_key;
202 se_cmd->orig_fe_lun = unpacked_lun; 200 se_cmd->orig_fe_lun = unpacked_lun;
203 se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
204 } 201 }
205 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 202 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
206 203
@@ -708,7 +705,7 @@ done:
708 705
709 se_task->task_scsi_status = GOOD; 706 se_task->task_scsi_status = GOOD;
710 transport_complete_task(se_task, 1); 707 transport_complete_task(se_task, 1);
711 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 708 return 0;
712} 709}
713 710
714/* se_release_device_for_hba(): 711/* se_release_device_for_hba():
@@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
957 return -EINVAL; 954 return -EINVAL;
958 } 955 }
959 956
960 pr_err("dpo_emulated not supported\n"); 957 if (flag) {
961 return -EINVAL; 958 pr_err("dpo_emulated not supported\n");
959 return -EINVAL;
960 }
961
962 return 0;
962} 963}
963 964
964int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) 965int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
968 return -EINVAL; 969 return -EINVAL;
969 } 970 }
970 971
971 if (dev->transport->fua_write_emulated == 0) { 972 if (flag && dev->transport->fua_write_emulated == 0) {
972 pr_err("fua_write_emulated not supported\n"); 973 pr_err("fua_write_emulated not supported\n");
973 return -EINVAL; 974 return -EINVAL;
974 } 975 }
@@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
985 return -EINVAL; 986 return -EINVAL;
986 } 987 }
987 988
988 pr_err("ua read emulated not supported\n"); 989 if (flag) {
989 return -EINVAL; 990 pr_err("ua read emulated not supported\n");
991 return -EINVAL;
992 }
993
994 return 0;
990} 995}
991 996
992int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) 997int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
995 pr_err("Illegal value %d\n", flag); 1000 pr_err("Illegal value %d\n", flag);
996 return -EINVAL; 1001 return -EINVAL;
997 } 1002 }
998 if (dev->transport->write_cache_emulated == 0) { 1003 if (flag && dev->transport->write_cache_emulated == 0) {
999 pr_err("write_cache_emulated not supported\n"); 1004 pr_err("write_cache_emulated not supported\n");
1000 return -EINVAL; 1005 return -EINVAL;
1001 } 1006 }
@@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1056 * We expect this value to be non-zero when generic Block Layer 1061 * We expect this value to be non-zero when generic Block Layer
1057 * Discard supported is detected iblock_create_virtdevice(). 1062 * Discard supported is detected iblock_create_virtdevice().
1058 */ 1063 */
1059 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 1064 if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1060 pr_err("Generic Block Discard not supported\n"); 1065 pr_err("Generic Block Discard not supported\n");
1061 return -ENOSYS; 1066 return -ENOSYS;
1062 } 1067 }
@@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1077 * We expect this value to be non-zero when generic Block Layer 1082 * We expect this value to be non-zero when generic Block Layer
1078 * Discard supported is detected iblock_create_virtdevice(). 1083 * Discard supported is detected iblock_create_virtdevice().
1079 */ 1084 */
1080 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 1085 if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1081 pr_err("Generic Block Discard not supported\n"); 1086 pr_err("Generic Block Discard not supported\n");
1082 return -ENOSYS; 1087 return -ENOSYS;
1083 } 1088 }
@@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
1587 ret = -ENOMEM; 1592 ret = -ENOMEM;
1588 goto out; 1593 goto out;
1589 } 1594 }
1590 INIT_LIST_HEAD(&se_dev->se_dev_node);
1591 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); 1595 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1592 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); 1596 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1593 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); 1597 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 67cd6fe05bfa..b4864fba4ef0 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
289 return -ENOMEM; 289 return -ENOMEM;
290 } 290 }
291 291
292 for (i = 0; i < task->task_sg_nents; i++) { 292 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
293 iov[i].iov_len = sg[i].length; 293 iov[i].iov_len = sg->length;
294 iov[i].iov_base = sg_virt(&sg[i]); 294 iov[i].iov_base = sg_virt(sg);
295 } 295 }
296 296
297 old_fs = get_fs(); 297 old_fs = get_fs();
@@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
342 return -ENOMEM; 342 return -ENOMEM;
343 } 343 }
344 344
345 for (i = 0; i < task->task_sg_nents; i++) { 345 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
346 iov[i].iov_len = sg[i].length; 346 iov[i].iov_len = sg->length;
347 iov[i].iov_base = sg_virt(&sg[i]); 347 iov[i].iov_base = sg_virt(sg);
348 } 348 }
349 349
350 old_fs = get_fs(); 350 old_fs = get_fs();
@@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
438 if (ret > 0 && 438 if (ret > 0 &&
439 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && 439 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
440 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 440 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
441 cmd->t_tasks_fua) { 441 (cmd->se_cmd_flags & SCF_FUA)) {
442 /* 442 /*
443 * We might need to be a bit smarter here 443 * We might need to be a bit smarter here
444 * and return some sense data to let the initiator 444 * and return some sense data to let the initiator
@@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
449 449
450 } 450 }
451 451
452 if (ret < 0) 452 if (ret < 0) {
453 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
453 return ret; 454 return ret;
455 }
454 if (ret) { 456 if (ret) {
455 task->task_scsi_status = GOOD; 457 task->task_scsi_status = GOOD;
456 transport_complete_task(task, 1); 458 transport_complete_task(task, 1);
457 } 459 }
458 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 460 return 0;
459} 461}
460 462
461/* fd_free_task(): (Part of se_subsystem_api_t template) 463/* fd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7698efe29262..4aa992204438 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task)
531 */ 531 */
532 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || 532 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
533 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 533 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
534 task->task_se_cmd->t_tasks_fua)) 534 (cmd->se_cmd_flags & SCF_FUA)))
535 rw = WRITE_FUA; 535 rw = WRITE_FUA;
536 else 536 else
537 rw = WRITE; 537 rw = WRITE;
@@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task)
554 else { 554 else {
555 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 555 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
556 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 556 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
557 return PYX_TRANSPORT_LU_COMM_FAILURE; 557 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
558 return -ENOSYS;
558 } 559 }
559 560
560 bio = iblock_get_bio(task, block_lba, sg_num); 561 bio = iblock_get_bio(task, block_lba, sg_num);
561 if (!bio) 562 if (!bio) {
562 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 563 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
564 return -ENOMEM;
565 }
563 566
564 bio_list_init(&list); 567 bio_list_init(&list);
565 bio_list_add(&list, bio); 568 bio_list_add(&list, bio);
@@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task)
588 submit_bio(rw, bio); 591 submit_bio(rw, bio);
589 blk_finish_plug(&plug); 592 blk_finish_plug(&plug);
590 593
591 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 594 return 0;
592 595
593fail: 596fail:
594 while ((bio = bio_list_pop(&list))) 597 while ((bio = bio_list_pop(&list)))
595 bio_put(bio); 598 bio_put(bio);
596 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 599 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
600 return -ENOMEM;
597} 601}
598 602
599static u32 iblock_get_device_rev(struct se_device *dev) 603static u32 iblock_get_device_rev(struct se_device *dev)
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5a4ebfc3a54f..95dee7074aeb 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
191 pr_err("Received legacy SPC-2 RESERVE/RELEASE" 191 pr_err("Received legacy SPC-2 RESERVE/RELEASE"
192 " while active SPC-3 registrations exist," 192 " while active SPC-3 registrations exist,"
193 " returning RESERVATION_CONFLICT\n"); 193 " returning RESERVATION_CONFLICT\n");
194 *ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 194 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
195 return true; 195 return true;
196 } 196 }
197 197
@@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
252 (cmd->t_task_cdb[1] & 0x02)) { 252 (cmd->t_task_cdb[1] & 0x02)) {
253 pr_err("LongIO and Obselete Bits set, returning" 253 pr_err("LongIO and Obselete Bits set, returning"
254 " ILLEGAL_REQUEST\n"); 254 " ILLEGAL_REQUEST\n");
255 ret = PYX_TRANSPORT_ILLEGAL_REQUEST; 255 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
256 ret = -EINVAL;
256 goto out; 257 goto out;
257 } 258 }
258 /* 259 /*
@@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
277 " from %s \n", cmd->se_lun->unpacked_lun, 278 " from %s \n", cmd->se_lun->unpacked_lun,
278 cmd->se_deve->mapped_lun, 279 cmd->se_deve->mapped_lun,
279 sess->se_node_acl->initiatorname); 280 sess->se_node_acl->initiatorname);
280 ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 281 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
282 ret = -EINVAL;
281 goto out_unlock; 283 goto out_unlock;
282 } 284 }
283 285
@@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port(
1510 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); 1512 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
1511 if (!tidh_new) { 1513 if (!tidh_new) {
1512 pr_err("Unable to allocate tidh_new\n"); 1514 pr_err("Unable to allocate tidh_new\n");
1513 return PYX_TRANSPORT_LU_COMM_FAILURE; 1515 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1516 return -EINVAL;
1514 } 1517 }
1515 INIT_LIST_HEAD(&tidh_new->dest_list); 1518 INIT_LIST_HEAD(&tidh_new->dest_list);
1516 tidh_new->dest_tpg = tpg; 1519 tidh_new->dest_tpg = tpg;
@@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port(
1522 sa_res_key, all_tg_pt, aptpl); 1525 sa_res_key, all_tg_pt, aptpl);
1523 if (!local_pr_reg) { 1526 if (!local_pr_reg) {
1524 kfree(tidh_new); 1527 kfree(tidh_new);
1525 return PYX_TRANSPORT_LU_COMM_FAILURE; 1528 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1529 return -ENOMEM;
1526 } 1530 }
1527 tidh_new->dest_pr_reg = local_pr_reg; 1531 tidh_new->dest_pr_reg = local_pr_reg;
1528 /* 1532 /*
@@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port(
1548 pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header" 1552 pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
1549 " does not equal CDB data_length: %u\n", tpdl, 1553 " does not equal CDB data_length: %u\n", tpdl,
1550 cmd->data_length); 1554 cmd->data_length);
1551 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1555 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1556 ret = -EINVAL;
1552 goto out; 1557 goto out;
1553 } 1558 }
1554 /* 1559 /*
@@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port(
1598 " for tmp_tpg\n"); 1603 " for tmp_tpg\n");
1599 atomic_dec(&tmp_tpg->tpg_pr_ref_count); 1604 atomic_dec(&tmp_tpg->tpg_pr_ref_count);
1600 smp_mb__after_atomic_dec(); 1605 smp_mb__after_atomic_dec();
1601 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 1606 cmd->scsi_sense_reason =
1607 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1608 ret = -EINVAL;
1602 goto out; 1609 goto out;
1603 } 1610 }
1604 /* 1611 /*
@@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port(
1628 atomic_dec(&dest_node_acl->acl_pr_ref_count); 1635 atomic_dec(&dest_node_acl->acl_pr_ref_count);
1629 smp_mb__after_atomic_dec(); 1636 smp_mb__after_atomic_dec();
1630 core_scsi3_tpg_undepend_item(tmp_tpg); 1637 core_scsi3_tpg_undepend_item(tmp_tpg);
1631 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 1638 cmd->scsi_sense_reason =
1639 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1640 ret = -EINVAL;
1632 goto out; 1641 goto out;
1633 } 1642 }
1634 1643
@@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port(
1646 if (!dest_tpg) { 1655 if (!dest_tpg) {
1647 pr_err("SPC-3 PR SPEC_I_PT: Unable to locate" 1656 pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
1648 " dest_tpg\n"); 1657 " dest_tpg\n");
1649 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1658 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1659 ret = -EINVAL;
1650 goto out; 1660 goto out;
1651 } 1661 }
1652#if 0 1662#if 0
@@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port(
1660 " %u for Transport ID: %s\n", tid_len, ptr); 1670 " %u for Transport ID: %s\n", tid_len, ptr);
1661 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1671 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1662 core_scsi3_tpg_undepend_item(dest_tpg); 1672 core_scsi3_tpg_undepend_item(dest_tpg);
1663 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1673 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1674 ret = -EINVAL;
1664 goto out; 1675 goto out;
1665 } 1676 }
1666 /* 1677 /*
@@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port(
1678 1689
1679 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1690 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1680 core_scsi3_tpg_undepend_item(dest_tpg); 1691 core_scsi3_tpg_undepend_item(dest_tpg);
1681 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1692 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1693 ret = -EINVAL;
1682 goto out; 1694 goto out;
1683 } 1695 }
1684 1696
@@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port(
1690 smp_mb__after_atomic_dec(); 1702 smp_mb__after_atomic_dec();
1691 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1703 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1692 core_scsi3_tpg_undepend_item(dest_tpg); 1704 core_scsi3_tpg_undepend_item(dest_tpg);
1693 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 1705 cmd->scsi_sense_reason =
1706 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1707 ret = -EINVAL;
1694 goto out; 1708 goto out;
1695 } 1709 }
1696#if 0 1710#if 0
@@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port(
1727 core_scsi3_lunacl_undepend_item(dest_se_deve); 1741 core_scsi3_lunacl_undepend_item(dest_se_deve);
1728 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1742 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1729 core_scsi3_tpg_undepend_item(dest_tpg); 1743 core_scsi3_tpg_undepend_item(dest_tpg);
1730 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 1744 cmd->scsi_sense_reason =
1745 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1746 ret = -ENOMEM;
1731 goto out; 1747 goto out;
1732 } 1748 }
1733 INIT_LIST_HEAD(&tidh_new->dest_list); 1749 INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port(
1759 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1775 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1760 core_scsi3_tpg_undepend_item(dest_tpg); 1776 core_scsi3_tpg_undepend_item(dest_tpg);
1761 kfree(tidh_new); 1777 kfree(tidh_new);
1762 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1778 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1779 ret = -EINVAL;
1763 goto out; 1780 goto out;
1764 } 1781 }
1765 tidh_new->dest_pr_reg = dest_pr_reg; 1782 tidh_new->dest_pr_reg = dest_pr_reg;
@@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register(
2098 2115
2099 if (!se_sess || !se_lun) { 2116 if (!se_sess || !se_lun) {
2100 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2117 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2101 return PYX_TRANSPORT_LU_COMM_FAILURE; 2118 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2119 return -EINVAL;
2102 } 2120 }
2103 se_tpg = se_sess->se_tpg; 2121 se_tpg = se_sess->se_tpg;
2104 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2122 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register(
2117 if (res_key) { 2135 if (res_key) {
2118 pr_warn("SPC-3 PR: Reservation Key non-zero" 2136 pr_warn("SPC-3 PR: Reservation Key non-zero"
2119 " for SA REGISTER, returning CONFLICT\n"); 2137 " for SA REGISTER, returning CONFLICT\n");
2120 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2138 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2139 return -EINVAL;
2121 } 2140 }
2122 /* 2141 /*
2123 * Do nothing but return GOOD status. 2142 * Do nothing but return GOOD status.
2124 */ 2143 */
2125 if (!sa_res_key) 2144 if (!sa_res_key)
2126 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 2145 return 0;
2127 2146
2128 if (!spec_i_pt) { 2147 if (!spec_i_pt) {
2129 /* 2148 /*
@@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register(
2138 if (ret != 0) { 2157 if (ret != 0) {
2139 pr_err("Unable to allocate" 2158 pr_err("Unable to allocate"
2140 " struct t10_pr_registration\n"); 2159 " struct t10_pr_registration\n");
2141 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2160 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2161 return -EINVAL;
2142 } 2162 }
2143 } else { 2163 } else {
2144 /* 2164 /*
@@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register(
2197 " 0x%016Lx\n", res_key, 2217 " 0x%016Lx\n", res_key,
2198 pr_reg->pr_res_key); 2218 pr_reg->pr_res_key);
2199 core_scsi3_put_pr_reg(pr_reg); 2219 core_scsi3_put_pr_reg(pr_reg);
2200 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2220 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2221 return -EINVAL;
2201 } 2222 }
2202 } 2223 }
2203 if (spec_i_pt) { 2224 if (spec_i_pt) {
2204 pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT" 2225 pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
2205 " set while sa_res_key=0\n"); 2226 " set while sa_res_key=0\n");
2206 core_scsi3_put_pr_reg(pr_reg); 2227 core_scsi3_put_pr_reg(pr_reg);
2207 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2228 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2229 return -EINVAL;
2208 } 2230 }
2209 /* 2231 /*
2210 * An existing ALL_TG_PT=1 registration being released 2232 * An existing ALL_TG_PT=1 registration being released
@@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register(
2215 " registration exists, but ALL_TG_PT=1 bit not" 2237 " registration exists, but ALL_TG_PT=1 bit not"
2216 " present in received PROUT\n"); 2238 " present in received PROUT\n");
2217 core_scsi3_put_pr_reg(pr_reg); 2239 core_scsi3_put_pr_reg(pr_reg);
2218 return PYX_TRANSPORT_INVALID_CDB_FIELD; 2240 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2241 return -EINVAL;
2219 } 2242 }
2220 /* 2243 /*
2221 * Allocate APTPL metadata buffer used for UNREGISTER ops 2244 * Allocate APTPL metadata buffer used for UNREGISTER ops
@@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register(
2227 pr_err("Unable to allocate" 2250 pr_err("Unable to allocate"
2228 " pr_aptpl_buf\n"); 2251 " pr_aptpl_buf\n");
2229 core_scsi3_put_pr_reg(pr_reg); 2252 core_scsi3_put_pr_reg(pr_reg);
2230 return PYX_TRANSPORT_LU_COMM_FAILURE; 2253 cmd->scsi_sense_reason =
2254 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2255 return -EINVAL;
2231 } 2256 }
2232 } 2257 }
2233 /* 2258 /*
@@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register(
2241 if (pr_holder < 0) { 2266 if (pr_holder < 0) {
2242 kfree(pr_aptpl_buf); 2267 kfree(pr_aptpl_buf);
2243 core_scsi3_put_pr_reg(pr_reg); 2268 core_scsi3_put_pr_reg(pr_reg);
2244 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2269 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2270 return -EINVAL;
2245 } 2271 }
2246 2272
2247 spin_lock(&pr_tmpl->registration_lock); 2273 spin_lock(&pr_tmpl->registration_lock);
@@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve(
2405 2431
2406 if (!se_sess || !se_lun) { 2432 if (!se_sess || !se_lun) {
2407 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2433 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2408 return PYX_TRANSPORT_LU_COMM_FAILURE; 2434 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2435 return -EINVAL;
2409 } 2436 }
2410 se_tpg = se_sess->se_tpg; 2437 se_tpg = se_sess->se_tpg;
2411 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2438 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve(
2417 if (!pr_reg) { 2444 if (!pr_reg) {
2418 pr_err("SPC-3 PR: Unable to locate" 2445 pr_err("SPC-3 PR: Unable to locate"
2419 " PR_REGISTERED *pr_reg for RESERVE\n"); 2446 " PR_REGISTERED *pr_reg for RESERVE\n");
2420 return PYX_TRANSPORT_LU_COMM_FAILURE; 2447 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2448 return -EINVAL;
2421 } 2449 }
2422 /* 2450 /*
2423 * From spc4r17 Section 5.7.9: Reserving: 2451 * From spc4r17 Section 5.7.9: Reserving:
@@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve(
2433 " does not match existing SA REGISTER res_key:" 2461 " does not match existing SA REGISTER res_key:"
2434 " 0x%016Lx\n", res_key, pr_reg->pr_res_key); 2462 " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
2435 core_scsi3_put_pr_reg(pr_reg); 2463 core_scsi3_put_pr_reg(pr_reg);
2436 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2464 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2465 return -EINVAL;
2437 } 2466 }
2438 /* 2467 /*
2439 * From spc4r17 Section 5.7.9: Reserving: 2468 * From spc4r17 Section 5.7.9: Reserving:
@@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve(
2448 if (scope != PR_SCOPE_LU_SCOPE) { 2477 if (scope != PR_SCOPE_LU_SCOPE) {
2449 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 2478 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
2450 core_scsi3_put_pr_reg(pr_reg); 2479 core_scsi3_put_pr_reg(pr_reg);
2451 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2480 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2481 return -EINVAL;
2452 } 2482 }
2453 /* 2483 /*
2454 * See if we have an existing PR reservation holder pointer at 2484 * See if we have an existing PR reservation holder pointer at
@@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve(
2480 2510
2481 spin_unlock(&dev->dev_reservation_lock); 2511 spin_unlock(&dev->dev_reservation_lock);
2482 core_scsi3_put_pr_reg(pr_reg); 2512 core_scsi3_put_pr_reg(pr_reg);
2483 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2513 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2514 return -EINVAL;
2484 } 2515 }
2485 /* 2516 /*
2486 * From spc4r17 Section 5.7.9: Reserving: 2517 * From spc4r17 Section 5.7.9: Reserving:
@@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve(
2503 2534
2504 spin_unlock(&dev->dev_reservation_lock); 2535 spin_unlock(&dev->dev_reservation_lock);
2505 core_scsi3_put_pr_reg(pr_reg); 2536 core_scsi3_put_pr_reg(pr_reg);
2506 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2537 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2538 return -EINVAL;
2507 } 2539 }
2508 /* 2540 /*
2509 * From spc4r17 Section 5.7.9: Reserving: 2541 * From spc4r17 Section 5.7.9: Reserving:
@@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve(
2517 */ 2549 */
2518 spin_unlock(&dev->dev_reservation_lock); 2550 spin_unlock(&dev->dev_reservation_lock);
2519 core_scsi3_put_pr_reg(pr_reg); 2551 core_scsi3_put_pr_reg(pr_reg);
2520 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 2552 return 0;
2521 } 2553 }
2522 /* 2554 /*
2523 * Otherwise, our *pr_reg becomes the PR reservation holder for said 2555 * Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve(
2574 default: 2606 default:
2575 pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:" 2607 pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
2576 " 0x%02x\n", type); 2608 " 0x%02x\n", type);
2577 return PYX_TRANSPORT_INVALID_CDB_FIELD; 2609 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2610 return -EINVAL;
2578 } 2611 }
2579 2612
2580 return ret; 2613 return ret;
@@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release(
2630 2663
2631 if (!se_sess || !se_lun) { 2664 if (!se_sess || !se_lun) {
2632 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2665 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2633 return PYX_TRANSPORT_LU_COMM_FAILURE; 2666 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2667 return -EINVAL;
2634 } 2668 }
2635 /* 2669 /*
2636 * Locate the existing *pr_reg via struct se_node_acl pointers 2670 * Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release(
2639 if (!pr_reg) { 2673 if (!pr_reg) {
2640 pr_err("SPC-3 PR: Unable to locate" 2674 pr_err("SPC-3 PR: Unable to locate"
2641 " PR_REGISTERED *pr_reg for RELEASE\n"); 2675 " PR_REGISTERED *pr_reg for RELEASE\n");
2642 return PYX_TRANSPORT_LU_COMM_FAILURE; 2676 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2677 return -EINVAL;
2643 } 2678 }
2644 /* 2679 /*
2645 * From spc4r17 Section 5.7.11.2 Releasing: 2680 * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release(
2661 */ 2696 */
2662 spin_unlock(&dev->dev_reservation_lock); 2697 spin_unlock(&dev->dev_reservation_lock);
2663 core_scsi3_put_pr_reg(pr_reg); 2698 core_scsi3_put_pr_reg(pr_reg);
2664 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 2699 return 0;
2665 } 2700 }
2666 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || 2701 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
2667 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) 2702 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release(
2675 */ 2710 */
2676 spin_unlock(&dev->dev_reservation_lock); 2711 spin_unlock(&dev->dev_reservation_lock);
2677 core_scsi3_put_pr_reg(pr_reg); 2712 core_scsi3_put_pr_reg(pr_reg);
2678 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 2713 return 0;
2679 } 2714 }
2680 /* 2715 /*
2681 * From spc4r17 Section 5.7.11.2 Releasing: 2716 * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release(
2697 " 0x%016Lx\n", res_key, pr_reg->pr_res_key); 2732 " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
2698 spin_unlock(&dev->dev_reservation_lock); 2733 spin_unlock(&dev->dev_reservation_lock);
2699 core_scsi3_put_pr_reg(pr_reg); 2734 core_scsi3_put_pr_reg(pr_reg);
2700 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2735 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2736 return -EINVAL;
2701 } 2737 }
2702 /* 2738 /*
2703 * From spc4r17 Section 5.7.11.2 Releasing and above: 2739 * From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release(
2719 2755
2720 spin_unlock(&dev->dev_reservation_lock); 2756 spin_unlock(&dev->dev_reservation_lock);
2721 core_scsi3_put_pr_reg(pr_reg); 2757 core_scsi3_put_pr_reg(pr_reg);
2722 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2758 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2759 return -EINVAL;
2723 } 2760 }
2724 /* 2761 /*
2725 * In response to a persistent reservation release request from the 2762 * In response to a persistent reservation release request from the
@@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear(
2802 if (!pr_reg_n) { 2839 if (!pr_reg_n) {
2803 pr_err("SPC-3 PR: Unable to locate" 2840 pr_err("SPC-3 PR: Unable to locate"
2804 " PR_REGISTERED *pr_reg for CLEAR\n"); 2841 " PR_REGISTERED *pr_reg for CLEAR\n");
2805 return PYX_TRANSPORT_LU_COMM_FAILURE; 2842 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2843 return -EINVAL;
2806 } 2844 }
2807 /* 2845 /*
2808 * From spc4r17 section 5.7.11.6, Clearing: 2846 * From spc4r17 section 5.7.11.6, Clearing:
@@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear(
2821 " existing SA REGISTER res_key:" 2859 " existing SA REGISTER res_key:"
2822 " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); 2860 " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
2823 core_scsi3_put_pr_reg(pr_reg_n); 2861 core_scsi3_put_pr_reg(pr_reg_n);
2824 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2862 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2863 return -EINVAL;
2825 } 2864 }
2826 /* 2865 /*
2827 * a) Release the persistent reservation, if any; 2866 * a) Release the persistent reservation, if any;
@@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt(
2979 int all_reg = 0, calling_it_nexus = 0, released_regs = 0; 3018 int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
2980 int prh_type = 0, prh_scope = 0, ret; 3019 int prh_type = 0, prh_scope = 0, ret;
2981 3020
2982 if (!se_sess) 3021 if (!se_sess) {
2983 return PYX_TRANSPORT_LU_COMM_FAILURE; 3022 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3023 return -EINVAL;
3024 }
2984 3025
2985 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 3026 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
2986 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 3027 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt(
2989 pr_err("SPC-3 PR: Unable to locate" 3030 pr_err("SPC-3 PR: Unable to locate"
2990 " PR_REGISTERED *pr_reg for PREEMPT%s\n", 3031 " PR_REGISTERED *pr_reg for PREEMPT%s\n",
2991 (abort) ? "_AND_ABORT" : ""); 3032 (abort) ? "_AND_ABORT" : "");
2992 return PYX_TRANSPORT_RESERVATION_CONFLICT; 3033 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3034 return -EINVAL;
2993 } 3035 }
2994 if (pr_reg_n->pr_res_key != res_key) { 3036 if (pr_reg_n->pr_res_key != res_key) {
2995 core_scsi3_put_pr_reg(pr_reg_n); 3037 core_scsi3_put_pr_reg(pr_reg_n);
2996 return PYX_TRANSPORT_RESERVATION_CONFLICT; 3038 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3039 return -EINVAL;
2997 } 3040 }
2998 if (scope != PR_SCOPE_LU_SCOPE) { 3041 if (scope != PR_SCOPE_LU_SCOPE) {
2999 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 3042 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
3000 core_scsi3_put_pr_reg(pr_reg_n); 3043 core_scsi3_put_pr_reg(pr_reg_n);
3001 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3044 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3045 return -EINVAL;
3002 } 3046 }
3003 INIT_LIST_HEAD(&preempt_and_abort_list); 3047 INIT_LIST_HEAD(&preempt_and_abort_list);
3004 3048
@@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt(
3012 if (!all_reg && !sa_res_key) { 3056 if (!all_reg && !sa_res_key) {
3013 spin_unlock(&dev->dev_reservation_lock); 3057 spin_unlock(&dev->dev_reservation_lock);
3014 core_scsi3_put_pr_reg(pr_reg_n); 3058 core_scsi3_put_pr_reg(pr_reg_n);
3015 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3059 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3060 return -EINVAL;
3016 } 3061 }
3017 /* 3062 /*
3018 * From spc4r17, section 5.7.11.4.4 Removing Registrations: 3063 * From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt(
3106 if (!released_regs) { 3151 if (!released_regs) {
3107 spin_unlock(&dev->dev_reservation_lock); 3152 spin_unlock(&dev->dev_reservation_lock);
3108 core_scsi3_put_pr_reg(pr_reg_n); 3153 core_scsi3_put_pr_reg(pr_reg_n);
3109 return PYX_TRANSPORT_RESERVATION_CONFLICT; 3154 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3155 return -EINVAL;
3110 } 3156 }
3111 /* 3157 /*
3112 * For an existing all registrants type reservation 3158 * For an existing all registrants type reservation
@@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt(
3297 default: 3343 default:
3298 pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s" 3344 pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
3299 " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); 3345 " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
3300 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3346 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3347 return -EINVAL;
3301 } 3348 }
3302 3349
3303 return ret; 3350 return ret;
@@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3331 3378
3332 if (!se_sess || !se_lun) { 3379 if (!se_sess || !se_lun) {
3333 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 3380 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
3334 return PYX_TRANSPORT_LU_COMM_FAILURE; 3381 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3382 return -EINVAL;
3335 } 3383 }
3336 memset(dest_iport, 0, 64); 3384 memset(dest_iport, 0, 64);
3337 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 3385 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3349 if (!pr_reg) { 3397 if (!pr_reg) {
3350 pr_err("SPC-3 PR: Unable to locate PR_REGISTERED" 3398 pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
3351 " *pr_reg for REGISTER_AND_MOVE\n"); 3399 " *pr_reg for REGISTER_AND_MOVE\n");
3352 return PYX_TRANSPORT_LU_COMM_FAILURE; 3400 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3401 return -EINVAL;
3353 } 3402 }
3354 /* 3403 /*
3355 * The provided reservation key much match the existing reservation key 3404 * The provided reservation key much match the existing reservation key
@@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3360 " res_key: 0x%016Lx does not match existing SA REGISTER" 3409 " res_key: 0x%016Lx does not match existing SA REGISTER"
3361 " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); 3410 " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
3362 core_scsi3_put_pr_reg(pr_reg); 3411 core_scsi3_put_pr_reg(pr_reg);
3363 return PYX_TRANSPORT_RESERVATION_CONFLICT; 3412 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3413 return -EINVAL;
3364 } 3414 }
3365 /* 3415 /*
3366 * The service active reservation key needs to be non zero 3416 * The service active reservation key needs to be non zero
@@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3369 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero" 3419 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
3370 " sa_res_key\n"); 3420 " sa_res_key\n");
3371 core_scsi3_put_pr_reg(pr_reg); 3421 core_scsi3_put_pr_reg(pr_reg);
3372 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3422 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3423 return -EINVAL;
3373 } 3424 }
3374 3425
3375 /* 3426 /*
@@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3392 " does not equal CDB data_length: %u\n", tid_len, 3443 " does not equal CDB data_length: %u\n", tid_len,
3393 cmd->data_length); 3444 cmd->data_length);
3394 core_scsi3_put_pr_reg(pr_reg); 3445 core_scsi3_put_pr_reg(pr_reg);
3395 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3446 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3447 return -EINVAL;
3396 } 3448 }
3397 3449
3398 spin_lock(&dev->se_port_lock); 3450 spin_lock(&dev->se_port_lock);
@@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3417 atomic_dec(&dest_se_tpg->tpg_pr_ref_count); 3469 atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
3418 smp_mb__after_atomic_dec(); 3470 smp_mb__after_atomic_dec();
3419 core_scsi3_put_pr_reg(pr_reg); 3471 core_scsi3_put_pr_reg(pr_reg);
3420 return PYX_TRANSPORT_LU_COMM_FAILURE; 3472 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3473 return -EINVAL;
3421 } 3474 }
3422 3475
3423 spin_lock(&dev->se_port_lock); 3476 spin_lock(&dev->se_port_lock);
@@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3430 " fabric ops from Relative Target Port Identifier:" 3483 " fabric ops from Relative Target Port Identifier:"
3431 " %hu\n", rtpi); 3484 " %hu\n", rtpi);
3432 core_scsi3_put_pr_reg(pr_reg); 3485 core_scsi3_put_pr_reg(pr_reg);
3433 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3486 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3487 return -EINVAL;
3434 } 3488 }
3435 3489
3436 buf = transport_kmap_first_data_page(cmd); 3490 buf = transport_kmap_first_data_page(cmd);
@@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move(
3445 " from fabric: %s\n", proto_ident, 3499 " from fabric: %s\n", proto_ident,
3446 dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), 3500 dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
3447 dest_tf_ops->get_fabric_name()); 3501 dest_tf_ops->get_fabric_name());
3448 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3502 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3503 ret = -EINVAL;
3449 goto out; 3504 goto out;
3450 } 3505 }
3451 if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { 3506 if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
3452 pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not" 3507 pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
3453 " containg a valid tpg_parse_pr_out_transport_id" 3508 " containg a valid tpg_parse_pr_out_transport_id"
3454 " function pointer\n"); 3509 " function pointer\n");
3455 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 3510 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3511 ret = -EINVAL;
3456 goto out; 3512 goto out;
3457 } 3513 }
3458 initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, 3514 initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3460 if (!initiator_str) { 3516 if (!initiator_str) {
3461 pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" 3517 pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
3462 " initiator_str from Transport ID\n"); 3518 " initiator_str from Transport ID\n");
3463 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3519 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3520 ret = -EINVAL;
3464 goto out; 3521 goto out;
3465 } 3522 }
3466 3523
@@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3489 pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" 3546 pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
3490 " matches: %s on received I_T Nexus\n", initiator_str, 3547 " matches: %s on received I_T Nexus\n", initiator_str,
3491 pr_reg_nacl->initiatorname); 3548 pr_reg_nacl->initiatorname);
3492 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3549 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3550 ret = -EINVAL;
3493 goto out; 3551 goto out;
3494 } 3552 }
3495 if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) { 3553 if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3497 " matches: %s %s on received I_T Nexus\n", 3555 " matches: %s %s on received I_T Nexus\n",
3498 initiator_str, iport_ptr, pr_reg_nacl->initiatorname, 3556 initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
3499 pr_reg->pr_reg_isid); 3557 pr_reg->pr_reg_isid);
3500 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3558 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3559 ret = -EINVAL;
3501 goto out; 3560 goto out;
3502 } 3561 }
3503after_iport_check: 3562after_iport_check:
@@ -3517,7 +3576,8 @@ after_iport_check:
3517 pr_err("Unable to locate %s dest_node_acl for" 3576 pr_err("Unable to locate %s dest_node_acl for"
3518 " TransportID%s\n", dest_tf_ops->get_fabric_name(), 3577 " TransportID%s\n", dest_tf_ops->get_fabric_name(),
3519 initiator_str); 3578 initiator_str);
3520 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3579 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3580 ret = -EINVAL;
3521 goto out; 3581 goto out;
3522 } 3582 }
3523 ret = core_scsi3_nodeacl_depend_item(dest_node_acl); 3583 ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@@ -3527,7 +3587,8 @@ after_iport_check:
3527 atomic_dec(&dest_node_acl->acl_pr_ref_count); 3587 atomic_dec(&dest_node_acl->acl_pr_ref_count);
3528 smp_mb__after_atomic_dec(); 3588 smp_mb__after_atomic_dec();
3529 dest_node_acl = NULL; 3589 dest_node_acl = NULL;
3530 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 3590 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3591 ret = -EINVAL;
3531 goto out; 3592 goto out;
3532 } 3593 }
3533#if 0 3594#if 0
@@ -3543,7 +3604,8 @@ after_iport_check:
3543 if (!dest_se_deve) { 3604 if (!dest_se_deve) {
3544 pr_err("Unable to locate %s dest_se_deve from RTPI:" 3605 pr_err("Unable to locate %s dest_se_deve from RTPI:"
3545 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); 3606 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
3546 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3607 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3608 ret = -EINVAL;
3547 goto out; 3609 goto out;
3548 } 3610 }
3549 3611
@@ -3553,7 +3615,8 @@ after_iport_check:
3553 atomic_dec(&dest_se_deve->pr_ref_count); 3615 atomic_dec(&dest_se_deve->pr_ref_count);
3554 smp_mb__after_atomic_dec(); 3616 smp_mb__after_atomic_dec();
3555 dest_se_deve = NULL; 3617 dest_se_deve = NULL;
3556 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 3618 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3619 ret = -EINVAL;
3557 goto out; 3620 goto out;
3558 } 3621 }
3559#if 0 3622#if 0
@@ -3572,7 +3635,8 @@ after_iport_check:
3572 pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation" 3635 pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
3573 " currently held\n"); 3636 " currently held\n");
3574 spin_unlock(&dev->dev_reservation_lock); 3637 spin_unlock(&dev->dev_reservation_lock);
3575 ret = PYX_TRANSPORT_INVALID_CDB_FIELD; 3638 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3639 ret = -EINVAL;
3576 goto out; 3640 goto out;
3577 } 3641 }
3578 /* 3642 /*
@@ -3585,7 +3649,8 @@ after_iport_check:
3585 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" 3649 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
3586 " Nexus is not reservation holder\n"); 3650 " Nexus is not reservation holder\n");
3587 spin_unlock(&dev->dev_reservation_lock); 3651 spin_unlock(&dev->dev_reservation_lock);
3588 ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 3652 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3653 ret = -EINVAL;
3589 goto out; 3654 goto out;
3590 } 3655 }
3591 /* 3656 /*
@@ -3603,7 +3668,8 @@ after_iport_check:
3603 " reservation for type: %s\n", 3668 " reservation for type: %s\n",
3604 core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); 3669 core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
3605 spin_unlock(&dev->dev_reservation_lock); 3670 spin_unlock(&dev->dev_reservation_lock);
3606 ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 3671 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3672 ret = -EINVAL;
3607 goto out; 3673 goto out;
3608 } 3674 }
3609 pr_res_nacl = pr_res_holder->pr_reg_nacl; 3675 pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3640,7 +3706,8 @@ after_iport_check:
3640 sa_res_key, 0, aptpl, 2, 1); 3706 sa_res_key, 0, aptpl, 2, 1);
3641 if (ret != 0) { 3707 if (ret != 0) {
3642 spin_unlock(&dev->dev_reservation_lock); 3708 spin_unlock(&dev->dev_reservation_lock);
3643 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3709 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3710 ret = -EINVAL;
3644 goto out; 3711 goto out;
3645 } 3712 }
3646 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, 3713 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3771 pr_err("Received PERSISTENT_RESERVE CDB while legacy" 3838 pr_err("Received PERSISTENT_RESERVE CDB while legacy"
3772 " SPC-2 reservation is held, returning" 3839 " SPC-2 reservation is held, returning"
3773 " RESERVATION_CONFLICT\n"); 3840 " RESERVATION_CONFLICT\n");
3774 ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 3841 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3842 ret = EINVAL;
3775 goto out; 3843 goto out;
3776 } 3844 }
3777 3845
@@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3779 * FIXME: A NULL struct se_session pointer means an this is not coming from 3847 * FIXME: A NULL struct se_session pointer means an this is not coming from
3780 * a $FABRIC_MOD's nexus, but from internal passthrough ops. 3848 * a $FABRIC_MOD's nexus, but from internal passthrough ops.
3781 */ 3849 */
3782 if (!cmd->se_sess) 3850 if (!cmd->se_sess) {
3783 return PYX_TRANSPORT_LU_COMM_FAILURE; 3851 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3852 return -EINVAL;
3853 }
3784 3854
3785 if (cmd->data_length < 24) { 3855 if (cmd->data_length < 24) {
3786 pr_warn("SPC-PR: Received PR OUT parameter list" 3856 pr_warn("SPC-PR: Received PR OUT parameter list"
3787 " length too small: %u\n", cmd->data_length); 3857 " length too small: %u\n", cmd->data_length);
3788 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3858 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3859 ret = -EINVAL;
3789 goto out; 3860 goto out;
3790 } 3861 }
3791 /* 3862 /*
@@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3820 * SPEC_I_PT=1 is only valid for Service action: REGISTER 3891 * SPEC_I_PT=1 is only valid for Service action: REGISTER
3821 */ 3892 */
3822 if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) { 3893 if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
3823 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3894 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3895 ret = -EINVAL;
3824 goto out; 3896 goto out;
3825 } 3897 }
3826 3898
@@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3837 (cmd->data_length != 24)) { 3909 (cmd->data_length != 24)) {
3838 pr_warn("SPC-PR: Received PR OUT illegal parameter" 3910 pr_warn("SPC-PR: Received PR OUT illegal parameter"
3839 " list length: %u\n", cmd->data_length); 3911 " list length: %u\n", cmd->data_length);
3840 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3912 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3913 ret = -EINVAL;
3841 goto out; 3914 goto out;
3842 } 3915 }
3843 /* 3916 /*
@@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3878 default: 3951 default:
3879 pr_err("Unknown PERSISTENT_RESERVE_OUT service" 3952 pr_err("Unknown PERSISTENT_RESERVE_OUT service"
3880 " action: 0x%02x\n", cdb[1] & 0x1f); 3953 " action: 0x%02x\n", cdb[1] & 0x1f);
3881 ret = PYX_TRANSPORT_INVALID_CDB_FIELD; 3954 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3955 ret = -EINVAL;
3882 break; 3956 break;
3883 } 3957 }
3884 3958
@@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
3906 if (cmd->data_length < 8) { 3980 if (cmd->data_length < 8) {
3907 pr_err("PRIN SA READ_KEYS SCSI Data Length: %u" 3981 pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
3908 " too small\n", cmd->data_length); 3982 " too small\n", cmd->data_length);
3909 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3983 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3984 return -EINVAL;
3910 } 3985 }
3911 3986
3912 buf = transport_kmap_first_data_page(cmd); 3987 buf = transport_kmap_first_data_page(cmd);
@@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
3965 if (cmd->data_length < 8) { 4040 if (cmd->data_length < 8) {
3966 pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u" 4041 pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
3967 " too small\n", cmd->data_length); 4042 " too small\n", cmd->data_length);
3968 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4043 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4044 return -EINVAL;
3969 } 4045 }
3970 4046
3971 buf = transport_kmap_first_data_page(cmd); 4047 buf = transport_kmap_first_data_page(cmd);
@@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
4047 if (cmd->data_length < 6) { 4123 if (cmd->data_length < 6) {
4048 pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:" 4124 pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
4049 " %u too small\n", cmd->data_length); 4125 " %u too small\n", cmd->data_length);
4050 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4126 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4127 return -EINVAL;
4051 } 4128 }
4052 4129
4053 buf = transport_kmap_first_data_page(cmd); 4130 buf = transport_kmap_first_data_page(cmd);
@@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4108 if (cmd->data_length < 8) { 4185 if (cmd->data_length < 8) {
4109 pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" 4186 pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
4110 " too small\n", cmd->data_length); 4187 " too small\n", cmd->data_length);
4111 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4188 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4189 return -EINVAL;
4112 } 4190 }
4113 4191
4114 buf = transport_kmap_first_data_page(cmd); 4192 buf = transport_kmap_first_data_page(cmd);
@@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
4255 pr_err("Received PERSISTENT_RESERVE CDB while legacy" 4333 pr_err("Received PERSISTENT_RESERVE CDB while legacy"
4256 " SPC-2 reservation is held, returning" 4334 " SPC-2 reservation is held, returning"
4257 " RESERVATION_CONFLICT\n"); 4335 " RESERVATION_CONFLICT\n");
4258 return PYX_TRANSPORT_RESERVATION_CONFLICT; 4336 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
4337 return -EINVAL;
4259 } 4338 }
4260 4339
4261 switch (cmd->t_task_cdb[1] & 0x1f) { 4340 switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
4274 default: 4353 default:
4275 pr_err("Unknown PERSISTENT_RESERVE_IN service" 4354 pr_err("Unknown PERSISTENT_RESERVE_IN service"
4276 " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f); 4355 " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
4277 ret = PYX_TRANSPORT_INVALID_CDB_FIELD; 4356 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4357 ret = -EINVAL;
4278 break; 4358 break;
4279 } 4359 }
4280 4360
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ed32e1efe429..8b15e56b0384 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
963static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, 963static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
964 struct bio **hbio) 964 struct bio **hbio)
965{ 965{
966 struct se_cmd *cmd = task->task_se_cmd;
966 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; 967 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
967 u32 task_sg_num = task->task_sg_nents; 968 u32 task_sg_num = task->task_sg_nents;
968 struct bio *bio = NULL, *tbio = NULL; 969 struct bio *bio = NULL, *tbio = NULL;
@@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
971 u32 data_len = task->task_size, i, len, bytes, off; 972 u32 data_len = task->task_size, i, len, bytes, off;
972 int nr_pages = (task->task_size + task_sg[0].offset + 973 int nr_pages = (task->task_size + task_sg[0].offset +
973 PAGE_SIZE - 1) >> PAGE_SHIFT; 974 PAGE_SIZE - 1) >> PAGE_SHIFT;
974 int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 975 int nr_vecs = 0, rc;
975 int rw = (task->task_data_direction == DMA_TO_DEVICE); 976 int rw = (task->task_data_direction == DMA_TO_DEVICE);
976 977
977 *hbio = NULL; 978 *hbio = NULL;
@@ -1058,11 +1059,13 @@ fail:
1058 bio->bi_next = NULL; 1059 bio->bi_next = NULL;
1059 bio_endio(bio, 0); /* XXX: should be error */ 1060 bio_endio(bio, 0); /* XXX: should be error */
1060 } 1061 }
1061 return ret; 1062 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1063 return -ENOMEM;
1062} 1064}
1063 1065
1064static int pscsi_do_task(struct se_task *task) 1066static int pscsi_do_task(struct se_task *task)
1065{ 1067{
1068 struct se_cmd *cmd = task->task_se_cmd;
1066 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; 1069 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
1067 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 1070 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1068 struct request *req; 1071 struct request *req;
@@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task)
1078 if (!req || IS_ERR(req)) { 1081 if (!req || IS_ERR(req)) {
1079 pr_err("PSCSI: blk_get_request() failed: %ld\n", 1082 pr_err("PSCSI: blk_get_request() failed: %ld\n",
1080 req ? IS_ERR(req) : -ENOMEM); 1083 req ? IS_ERR(req) : -ENOMEM);
1081 return PYX_TRANSPORT_LU_COMM_FAILURE; 1084 cmd->scsi_sense_reason =
1085 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1086 return -ENODEV;
1082 } 1087 }
1083 } else { 1088 } else {
1084 BUG_ON(!task->task_size); 1089 BUG_ON(!task->task_size);
@@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task)
1087 * Setup the main struct request for the task->task_sg[] payload 1092 * Setup the main struct request for the task->task_sg[] payload
1088 */ 1093 */
1089 ret = pscsi_map_sg(task, task->task_sg, &hbio); 1094 ret = pscsi_map_sg(task, task->task_sg, &hbio);
1090 if (ret < 0) 1095 if (ret < 0) {
1091 return PYX_TRANSPORT_LU_COMM_FAILURE; 1096 cmd->scsi_sense_reason =
1097 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1098 return ret;
1099 }
1092 1100
1093 req = blk_make_request(pdv->pdv_sd->request_queue, hbio, 1101 req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
1094 GFP_KERNEL); 1102 GFP_KERNEL);
@@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task)
1115 (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), 1123 (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
1116 pscsi_req_done); 1124 pscsi_req_done);
1117 1125
1118 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 1126 return 0;
1119 1127
1120fail: 1128fail:
1121 while (hbio) { 1129 while (hbio) {
@@ -1124,7 +1132,8 @@ fail:
1124 bio->bi_next = NULL; 1132 bio->bi_next = NULL;
1125 bio_endio(bio, 0); /* XXX: should be error */ 1133 bio_endio(bio, 0); /* XXX: should be error */
1126 } 1134 }
1127 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 1135 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1136 return -ENOMEM;
1128} 1137}
1129 1138
1130/* pscsi_get_sense_buffer(): 1139/* pscsi_get_sense_buffer():
@@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status(
1198 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], 1207 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
1199 pt->pscsi_result); 1208 pt->pscsi_result);
1200 task->task_scsi_status = SAM_STAT_CHECK_CONDITION; 1209 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
1201 task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1210 task->task_se_cmd->scsi_sense_reason =
1202 task->task_se_cmd->transport_error_status = 1211 TCM_UNSUPPORTED_SCSI_OPCODE;
1203 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1204 transport_complete_task(task, 0); 1212 transport_complete_task(task, 0);
1205 break; 1213 break;
1206 } 1214 }
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 5158d3846f19..02e51faa2f4e 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
343 return NULL; 343 return NULL;
344} 344}
345 345
346/* rd_MEMCPY_read(): 346static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
347 *
348 *
349 */
350static int rd_MEMCPY_read(struct rd_request *req)
351{ 347{
352 struct se_task *task = &req->rd_task; 348 struct se_task *task = &req->rd_task;
353 struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; 349 struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
354 struct rd_dev_sg_table *table; 350 struct rd_dev_sg_table *table;
355 struct scatterlist *sg_d, *sg_s; 351 struct scatterlist *rd_sg;
356 void *dst, *src; 352 struct sg_mapping_iter m;
357 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
358 u32 length, page_end = 0, table_sg_end;
359 u32 rd_offset = req->rd_offset; 353 u32 rd_offset = req->rd_offset;
354 u32 src_len;
360 355
361 table = rd_get_sg_table(dev, req->rd_page); 356 table = rd_get_sg_table(dev, req->rd_page);
362 if (!table) 357 if (!table)
363 return -EINVAL; 358 return -EINVAL;
364 359
365 table_sg_end = (table->page_end_offset - req->rd_page); 360 rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
366 sg_d = task->task_sg;
367 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
368 361
369 pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" 362 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
370 " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, 363 dev->rd_dev_id, read_rd ? "Read" : "Write",
371 req->rd_page, req->rd_offset); 364 task->task_lba, req->rd_size, req->rd_page,
372 365 rd_offset);
373 src_offset = rd_offset;
374 366
367 src_len = PAGE_SIZE - rd_offset;
368 sg_miter_start(&m, task->task_sg, task->task_sg_nents,
369 read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
375 while (req->rd_size) { 370 while (req->rd_size) {
376 if ((sg_d[i].length - dst_offset) < 371 u32 len;
377 (sg_s[j].length - src_offset)) { 372 void *rd_addr;
378 length = (sg_d[i].length - dst_offset);
379
380 pr_debug("Step 1 - sg_d[%d]: %p length: %d"
381 " offset: %u sg_s[%d].length: %u\n", i,
382 &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
383 sg_s[j].length);
384 pr_debug("Step 1 - length: %u dst_offset: %u"
385 " src_offset: %u\n", length, dst_offset,
386 src_offset);
387
388 if (length > req->rd_size)
389 length = req->rd_size;
390
391 dst = sg_virt(&sg_d[i++]) + dst_offset;
392 BUG_ON(!dst);
393
394 src = sg_virt(&sg_s[j]) + src_offset;
395 BUG_ON(!src);
396
397 dst_offset = 0;
398 src_offset = length;
399 page_end = 0;
400 } else {
401 length = (sg_s[j].length - src_offset);
402
403 pr_debug("Step 2 - sg_d[%d]: %p length: %d"
404 " offset: %u sg_s[%d].length: %u\n", i,
405 &sg_d[i], sg_d[i].length, sg_d[i].offset,
406 j, sg_s[j].length);
407 pr_debug("Step 2 - length: %u dst_offset: %u"
408 " src_offset: %u\n", length, dst_offset,
409 src_offset);
410
411 if (length > req->rd_size)
412 length = req->rd_size;
413
414 dst = sg_virt(&sg_d[i]) + dst_offset;
415 BUG_ON(!dst);
416
417 if (sg_d[i].length == length) {
418 i++;
419 dst_offset = 0;
420 } else
421 dst_offset = length;
422
423 src = sg_virt(&sg_s[j++]) + src_offset;
424 BUG_ON(!src);
425
426 src_offset = 0;
427 page_end = 1;
428 }
429 373
430 memcpy(dst, src, length); 374 sg_miter_next(&m);
375 len = min((u32)m.length, src_len);
376 m.consumed = len;
431 377
432 pr_debug("page: %u, remaining size: %u, length: %u," 378 rd_addr = sg_virt(rd_sg) + rd_offset;
433 " i: %u, j: %u\n", req->rd_page,
434 (req->rd_size - length), length, i, j);
435 379
436 req->rd_size -= length; 380 if (read_rd)
437 if (!req->rd_size) 381 memcpy(m.addr, rd_addr, len);
438 return 0; 382 else
383 memcpy(rd_addr, m.addr, len);
439 384
440 if (!page_end) 385 req->rd_size -= len;
386 if (!req->rd_size)
441 continue; 387 continue;
442 388
443 if (++req->rd_page <= table->page_end_offset) { 389 src_len -= len;
444 pr_debug("page: %u in same page table\n", 390 if (src_len) {
445 req->rd_page); 391 rd_offset += len;
446 continue; 392 continue;
447 } 393 }
448 394
449 pr_debug("getting new page table for page: %u\n", 395 /* rd page completed, next one please */
450 req->rd_page); 396 req->rd_page++;
451 397 rd_offset = 0;
452 table = rd_get_sg_table(dev, req->rd_page); 398 src_len = PAGE_SIZE;
453 if (!table) 399 if (req->rd_page <= table->page_end_offset) {
454 return -EINVAL; 400 rd_sg++;
455
456 sg_s = &table->sg_table[j = 0];
457 }
458
459 return 0;
460}
461
462/* rd_MEMCPY_write():
463 *
464 *
465 */
466static int rd_MEMCPY_write(struct rd_request *req)
467{
468 struct se_task *task = &req->rd_task;
469 struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
470 struct rd_dev_sg_table *table;
471 struct scatterlist *sg_d, *sg_s;
472 void *dst, *src;
473 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
474 u32 length, page_end = 0, table_sg_end;
475 u32 rd_offset = req->rd_offset;
476
477 table = rd_get_sg_table(dev, req->rd_page);
478 if (!table)
479 return -EINVAL;
480
481 table_sg_end = (table->page_end_offset - req->rd_page);
482 sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
483 sg_s = task->task_sg;
484
485 pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
486 " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
487 req->rd_page, req->rd_offset);
488
489 dst_offset = rd_offset;
490
491 while (req->rd_size) {
492 if ((sg_s[i].length - src_offset) <
493 (sg_d[j].length - dst_offset)) {
494 length = (sg_s[i].length - src_offset);
495
496 pr_debug("Step 1 - sg_s[%d]: %p length: %d"
497 " offset: %d sg_d[%d].length: %u\n", i,
498 &sg_s[i], sg_s[i].length, sg_s[i].offset,
499 j, sg_d[j].length);
500 pr_debug("Step 1 - length: %u src_offset: %u"
501 " dst_offset: %u\n", length, src_offset,
502 dst_offset);
503
504 if (length > req->rd_size)
505 length = req->rd_size;
506
507 src = sg_virt(&sg_s[i++]) + src_offset;
508 BUG_ON(!src);
509
510 dst = sg_virt(&sg_d[j]) + dst_offset;
511 BUG_ON(!dst);
512
513 src_offset = 0;
514 dst_offset = length;
515 page_end = 0;
516 } else {
517 length = (sg_d[j].length - dst_offset);
518
519 pr_debug("Step 2 - sg_s[%d]: %p length: %d"
520 " offset: %d sg_d[%d].length: %u\n", i,
521 &sg_s[i], sg_s[i].length, sg_s[i].offset,
522 j, sg_d[j].length);
523 pr_debug("Step 2 - length: %u src_offset: %u"
524 " dst_offset: %u\n", length, src_offset,
525 dst_offset);
526
527 if (length > req->rd_size)
528 length = req->rd_size;
529
530 src = sg_virt(&sg_s[i]) + src_offset;
531 BUG_ON(!src);
532
533 if (sg_s[i].length == length) {
534 i++;
535 src_offset = 0;
536 } else
537 src_offset = length;
538
539 dst = sg_virt(&sg_d[j++]) + dst_offset;
540 BUG_ON(!dst);
541
542 dst_offset = 0;
543 page_end = 1;
544 }
545
546 memcpy(dst, src, length);
547
548 pr_debug("page: %u, remaining size: %u, length: %u,"
549 " i: %u, j: %u\n", req->rd_page,
550 (req->rd_size - length), length, i, j);
551
552 req->rd_size -= length;
553 if (!req->rd_size)
554 return 0;
555
556 if (!page_end)
557 continue;
558
559 if (++req->rd_page <= table->page_end_offset) {
560 pr_debug("page: %u in same page table\n",
561 req->rd_page);
562 continue; 401 continue;
563 } 402 }
564 403
565 pr_debug("getting new page table for page: %u\n",
566 req->rd_page);
567
568 table = rd_get_sg_table(dev, req->rd_page); 404 table = rd_get_sg_table(dev, req->rd_page);
569 if (!table) 405 if (!table) {
406 sg_miter_stop(&m);
570 return -EINVAL; 407 return -EINVAL;
408 }
571 409
572 sg_d = &table->sg_table[j = 0]; 410 /* since we increment, the first sg entry is correct */
411 rd_sg = table->sg_table;
573 } 412 }
574 413 sg_miter_stop(&m);
575 return 0; 414 return 0;
576} 415}
577 416
@@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
583{ 422{
584 struct se_device *dev = task->task_se_cmd->se_dev; 423 struct se_device *dev = task->task_se_cmd->se_dev;
585 struct rd_request *req = RD_REQ(task); 424 struct rd_request *req = RD_REQ(task);
586 unsigned long long lba; 425 u64 tmp;
587 int ret; 426 int ret;
588 427
589 req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE; 428 tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
590 lba = task->task_lba; 429 req->rd_offset = do_div(tmp, PAGE_SIZE);
591 req->rd_offset = (do_div(lba, 430 req->rd_page = tmp;
592 (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
593 dev->se_sub_dev->se_dev_attrib.block_size;
594 req->rd_size = task->task_size; 431 req->rd_size = task->task_size;
595 432
596 if (task->task_data_direction == DMA_FROM_DEVICE) 433 ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
597 ret = rd_MEMCPY_read(req);
598 else
599 ret = rd_MEMCPY_write(req);
600
601 if (ret != 0) 434 if (ret != 0)
602 return ret; 435 return ret;
603 436
604 task->task_scsi_status = GOOD; 437 task->task_scsi_status = GOOD;
605 transport_complete_task(task, 1); 438 transport_complete_task(task, 1);
606 439 return 0;
607 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
608} 440}
609 441
610/* rd_free_task(): (Part of se_subsystem_api_t template) 442/* rd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 217e29df6297..684522805a1f 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list(
345 " %d t_fe_count: %d\n", (preempt_and_abort_list) ? 345 " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
346 "Preempt" : "", cmd, cmd->t_state, 346 "Preempt" : "", cmd, cmd->t_state,
347 atomic_read(&cmd->t_fe_count)); 347 atomic_read(&cmd->t_fe_count));
348 /*
349 * Signal that the command has failed via cmd->se_cmd_flags,
350 */
351 transport_new_cmd_failure(cmd);
352 348
353 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, 349 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
354 atomic_read(&cmd->t_fe_count)); 350 atomic_read(&cmd->t_fe_count));
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3400ae6e93f8..0257658e2e3e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -61,7 +61,6 @@
61static int sub_api_initialized; 61static int sub_api_initialized;
62 62
63static struct workqueue_struct *target_completion_wq; 63static struct workqueue_struct *target_completion_wq;
64static struct kmem_cache *se_cmd_cache;
65static struct kmem_cache *se_sess_cache; 64static struct kmem_cache *se_sess_cache;
66struct kmem_cache *se_tmr_req_cache; 65struct kmem_cache *se_tmr_req_cache;
67struct kmem_cache *se_ua_cache; 66struct kmem_cache *se_ua_cache;
@@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
82static void transport_put_cmd(struct se_cmd *cmd); 81static void transport_put_cmd(struct se_cmd *cmd);
83static void transport_remove_cmd_from_queue(struct se_cmd *cmd); 82static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
84static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); 83static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
85static void transport_generic_request_failure(struct se_cmd *, int, int); 84static void transport_generic_request_failure(struct se_cmd *);
86static void target_complete_ok_work(struct work_struct *work); 85static void target_complete_ok_work(struct work_struct *work);
87 86
88int init_se_kmem_caches(void) 87int init_se_kmem_caches(void)
89{ 88{
90 se_cmd_cache = kmem_cache_create("se_cmd_cache",
91 sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
92 if (!se_cmd_cache) {
93 pr_err("kmem_cache_create for struct se_cmd failed\n");
94 goto out;
95 }
96 se_tmr_req_cache = kmem_cache_create("se_tmr_cache", 89 se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
97 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), 90 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
98 0, NULL); 91 0, NULL);
99 if (!se_tmr_req_cache) { 92 if (!se_tmr_req_cache) {
100 pr_err("kmem_cache_create() for struct se_tmr_req" 93 pr_err("kmem_cache_create() for struct se_tmr_req"
101 " failed\n"); 94 " failed\n");
102 goto out_free_cmd_cache; 95 goto out;
103 } 96 }
104 se_sess_cache = kmem_cache_create("se_sess_cache", 97 se_sess_cache = kmem_cache_create("se_sess_cache",
105 sizeof(struct se_session), __alignof__(struct se_session), 98 sizeof(struct se_session), __alignof__(struct se_session),
@@ -182,8 +175,6 @@ out_free_sess_cache:
182 kmem_cache_destroy(se_sess_cache); 175 kmem_cache_destroy(se_sess_cache);
183out_free_tmr_req_cache: 176out_free_tmr_req_cache:
184 kmem_cache_destroy(se_tmr_req_cache); 177 kmem_cache_destroy(se_tmr_req_cache);
185out_free_cmd_cache:
186 kmem_cache_destroy(se_cmd_cache);
187out: 178out:
188 return -ENOMEM; 179 return -ENOMEM;
189} 180}
@@ -191,7 +182,6 @@ out:
191void release_se_kmem_caches(void) 182void release_se_kmem_caches(void)
192{ 183{
193 destroy_workqueue(target_completion_wq); 184 destroy_workqueue(target_completion_wq);
194 kmem_cache_destroy(se_cmd_cache);
195 kmem_cache_destroy(se_tmr_req_cache); 185 kmem_cache_destroy(se_tmr_req_cache);
196 kmem_cache_destroy(se_sess_cache); 186 kmem_cache_destroy(se_sess_cache);
197 kmem_cache_destroy(se_ua_cache); 187 kmem_cache_destroy(se_ua_cache);
@@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
680 task->task_scsi_status = GOOD; 670 task->task_scsi_status = GOOD;
681 } else { 671 } else {
682 task->task_scsi_status = SAM_STAT_CHECK_CONDITION; 672 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
683 task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; 673 task->task_se_cmd->scsi_sense_reason =
684 task->task_se_cmd->transport_error_status = 674 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
685 PYX_TRANSPORT_ILLEGAL_REQUEST; 675
686 } 676 }
687 677
688 transport_complete_task(task, good); 678 transport_complete_task(task, good);
@@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work)
693{ 683{
694 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 684 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
695 685
696 transport_generic_request_failure(cmd, 1, 1); 686 transport_generic_request_failure(cmd);
697} 687}
698 688
699/* transport_complete_task(): 689/* transport_complete_task():
@@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success)
755 if (cmd->t_tasks_failed) { 745 if (cmd->t_tasks_failed) {
756 if (!task->task_error_status) { 746 if (!task->task_error_status) {
757 task->task_error_status = 747 task->task_error_status =
758 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 748 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
759 cmd->transport_error_status = 749 cmd->scsi_sense_reason =
760 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 750 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
761 } 751 }
752
762 INIT_WORK(&cmd->work, target_complete_failure_work); 753 INIT_WORK(&cmd->work, target_complete_failure_work);
763 } else { 754 } else {
764 atomic_set(&cmd->t_transport_complete, 1); 755 atomic_set(&cmd->t_transport_complete, 1);
@@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba(
1335 dev->se_hba = hba; 1326 dev->se_hba = hba;
1336 dev->se_sub_dev = se_dev; 1327 dev->se_sub_dev = se_dev;
1337 dev->transport = transport; 1328 dev->transport = transport;
1338 atomic_set(&dev->active_cmds, 0);
1339 INIT_LIST_HEAD(&dev->dev_list); 1329 INIT_LIST_HEAD(&dev->dev_list);
1340 INIT_LIST_HEAD(&dev->dev_sep_list); 1330 INIT_LIST_HEAD(&dev->dev_sep_list);
1341 INIT_LIST_HEAD(&dev->dev_tmr_list); 1331 INIT_LIST_HEAD(&dev->dev_tmr_list);
1342 INIT_LIST_HEAD(&dev->execute_task_list); 1332 INIT_LIST_HEAD(&dev->execute_task_list);
1343 INIT_LIST_HEAD(&dev->delayed_cmd_list); 1333 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1344 INIT_LIST_HEAD(&dev->ordered_cmd_list);
1345 INIT_LIST_HEAD(&dev->state_task_list); 1334 INIT_LIST_HEAD(&dev->state_task_list);
1346 INIT_LIST_HEAD(&dev->qf_cmd_list); 1335 INIT_LIST_HEAD(&dev->qf_cmd_list);
1347 spin_lock_init(&dev->execute_task_lock); 1336 spin_lock_init(&dev->execute_task_lock);
1348 spin_lock_init(&dev->delayed_cmd_lock); 1337 spin_lock_init(&dev->delayed_cmd_lock);
1349 spin_lock_init(&dev->ordered_cmd_lock);
1350 spin_lock_init(&dev->state_task_lock);
1351 spin_lock_init(&dev->dev_alua_lock);
1352 spin_lock_init(&dev->dev_reservation_lock); 1338 spin_lock_init(&dev->dev_reservation_lock);
1353 spin_lock_init(&dev->dev_status_lock); 1339 spin_lock_init(&dev->dev_status_lock);
1354 spin_lock_init(&dev->dev_status_thr_lock);
1355 spin_lock_init(&dev->se_port_lock); 1340 spin_lock_init(&dev->se_port_lock);
1356 spin_lock_init(&dev->se_tmr_lock); 1341 spin_lock_init(&dev->se_tmr_lock);
1357 spin_lock_init(&dev->qf_cmd_lock); 1342 spin_lock_init(&dev->qf_cmd_lock);
@@ -1507,7 +1492,6 @@ void transport_init_se_cmd(
1507{ 1492{
1508 INIT_LIST_HEAD(&cmd->se_lun_node); 1493 INIT_LIST_HEAD(&cmd->se_lun_node);
1509 INIT_LIST_HEAD(&cmd->se_delayed_node); 1494 INIT_LIST_HEAD(&cmd->se_delayed_node);
1510 INIT_LIST_HEAD(&cmd->se_ordered_node);
1511 INIT_LIST_HEAD(&cmd->se_qf_node); 1495 INIT_LIST_HEAD(&cmd->se_qf_node);
1512 INIT_LIST_HEAD(&cmd->se_queue_node); 1496 INIT_LIST_HEAD(&cmd->se_queue_node);
1513 INIT_LIST_HEAD(&cmd->se_cmd_list); 1497 INIT_LIST_HEAD(&cmd->se_cmd_list);
@@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks(
1573 pr_err("Received SCSI CDB with command_size: %d that" 1557 pr_err("Received SCSI CDB with command_size: %d that"
1574 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1558 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1575 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1559 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1560 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1561 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1576 return -EINVAL; 1562 return -EINVAL;
1577 } 1563 }
1578 /* 1564 /*
@@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks(
1588 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1574 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1589 scsi_command_size(cdb), 1575 scsi_command_size(cdb),
1590 (unsigned long)sizeof(cmd->__t_task_cdb)); 1576 (unsigned long)sizeof(cmd->__t_task_cdb));
1577 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1578 cmd->scsi_sense_reason =
1579 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1591 return -ENOMEM; 1580 return -ENOMEM;
1592 } 1581 }
1593 } else 1582 } else
@@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct(
1658 * and call transport_generic_request_failure() if necessary.. 1647 * and call transport_generic_request_failure() if necessary..
1659 */ 1648 */
1660 ret = transport_generic_new_cmd(cmd); 1649 ret = transport_generic_new_cmd(cmd);
1661 if (ret < 0) { 1650 if (ret < 0)
1662 cmd->transport_error_status = ret; 1651 transport_generic_request_failure(cmd);
1663 transport_generic_request_failure(cmd, 0, 1652
1664 (cmd->data_direction != DMA_TO_DEVICE));
1665 }
1666 return 0; 1653 return 0;
1667} 1654}
1668EXPORT_SYMBOL(transport_handle_cdb_direct); 1655EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1798/* 1785/*
1799 * Handle SAM-esque emulation for generic transport request failures. 1786 * Handle SAM-esque emulation for generic transport request failures.
1800 */ 1787 */
1801static void transport_generic_request_failure( 1788static void transport_generic_request_failure(struct se_cmd *cmd)
1802 struct se_cmd *cmd,
1803 int complete,
1804 int sc)
1805{ 1789{
1806 int ret = 0; 1790 int ret = 0;
1807 1791
1808 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1792 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1809 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1793 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1810 cmd->t_task_cdb[0]); 1794 cmd->t_task_cdb[0]);
1811 pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n", 1795 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1812 cmd->se_tfo->get_cmd_state(cmd), 1796 cmd->se_tfo->get_cmd_state(cmd),
1813 cmd->t_state, 1797 cmd->t_state, cmd->scsi_sense_reason);
1814 cmd->transport_error_status);
1815 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" 1798 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1816 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" 1799 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1817 " t_transport_active: %d t_transport_stop: %d" 1800 " t_transport_active: %d t_transport_stop: %d"
@@ -1829,46 +1812,19 @@ static void transport_generic_request_failure(
1829 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 1812 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1830 transport_complete_task_attr(cmd); 1813 transport_complete_task_attr(cmd);
1831 1814
1832 if (complete) { 1815 switch (cmd->scsi_sense_reason) {
1833 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; 1816 case TCM_NON_EXISTENT_LUN:
1834 } 1817 case TCM_UNSUPPORTED_SCSI_OPCODE:
1835 1818 case TCM_INVALID_CDB_FIELD:
1836 switch (cmd->transport_error_status) { 1819 case TCM_INVALID_PARAMETER_LIST:
1837 case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: 1820 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1838 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1821 case TCM_UNKNOWN_MODE_PAGE:
1839 break; 1822 case TCM_WRITE_PROTECTED:
1840 case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: 1823 case TCM_CHECK_CONDITION_ABORT_CMD:
1841 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; 1824 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1842 break; 1825 case TCM_CHECK_CONDITION_NOT_READY:
1843 case PYX_TRANSPORT_INVALID_CDB_FIELD:
1844 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1845 break;
1846 case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
1847 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1848 break;
1849 case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
1850 if (!sc)
1851 transport_new_cmd_failure(cmd);
1852 /*
1853 * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
1854 * we force this session to fall back to session
1855 * recovery.
1856 */
1857 cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
1858 cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
1859
1860 goto check_stop;
1861 case PYX_TRANSPORT_LU_COMM_FAILURE:
1862 case PYX_TRANSPORT_ILLEGAL_REQUEST:
1863 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1864 break;
1865 case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
1866 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
1867 break;
1868 case PYX_TRANSPORT_WRITE_PROTECTED:
1869 cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
1870 break; 1826 break;
1871 case PYX_TRANSPORT_RESERVATION_CONFLICT: 1827 case TCM_RESERVATION_CONFLICT:
1872 /* 1828 /*
1873 * No SENSE Data payload for this case, set SCSI Status 1829 * No SENSE Data payload for this case, set SCSI Status
1874 * and queue the response to $FABRIC_MOD. 1830 * and queue the response to $FABRIC_MOD.
@@ -1893,15 +1849,9 @@ static void transport_generic_request_failure(
1893 if (ret == -EAGAIN || ret == -ENOMEM) 1849 if (ret == -EAGAIN || ret == -ENOMEM)
1894 goto queue_full; 1850 goto queue_full;
1895 goto check_stop; 1851 goto check_stop;
1896 case PYX_TRANSPORT_USE_SENSE_REASON:
1897 /*
1898 * struct se_cmd->scsi_sense_reason already set
1899 */
1900 break;
1901 default: 1852 default:
1902 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1853 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1903 cmd->t_task_cdb[0], 1854 cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1904 cmd->transport_error_status);
1905 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1855 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1906 break; 1856 break;
1907 } 1857 }
@@ -1912,14 +1862,10 @@ static void transport_generic_request_failure(
1912 * transport_send_check_condition_and_sense() after handling 1862 * transport_send_check_condition_and_sense() after handling
1913 * possible unsoliticied write data payloads. 1863 * possible unsoliticied write data payloads.
1914 */ 1864 */
1915 if (!sc && !cmd->se_tfo->new_cmd_map) 1865 ret = transport_send_check_condition_and_sense(cmd,
1916 transport_new_cmd_failure(cmd); 1866 cmd->scsi_sense_reason, 0);
1917 else { 1867 if (ret == -EAGAIN || ret == -ENOMEM)
1918 ret = transport_send_check_condition_and_sense(cmd, 1868 goto queue_full;
1919 cmd->scsi_sense_reason, 0);
1920 if (ret == -EAGAIN || ret == -ENOMEM)
1921 goto queue_full;
1922 }
1923 1869
1924check_stop: 1870check_stop:
1925 transport_lun_remove_cmd(cmd); 1871 transport_lun_remove_cmd(cmd);
@@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2002 * to allow the passed struct se_cmd list of tasks to the front of the list. 1948 * to allow the passed struct se_cmd list of tasks to the front of the list.
2003 */ 1949 */
2004 if (cmd->sam_task_attr == MSG_HEAD_TAG) { 1950 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2005 atomic_inc(&cmd->se_dev->dev_hoq_count);
2006 smp_mb__after_atomic_inc();
2007 pr_debug("Added HEAD_OF_QUEUE for CDB:" 1951 pr_debug("Added HEAD_OF_QUEUE for CDB:"
2008 " 0x%02x, se_ordered_id: %u\n", 1952 " 0x%02x, se_ordered_id: %u\n",
2009 cmd->t_task_cdb[0], 1953 cmd->t_task_cdb[0],
2010 cmd->se_ordered_id); 1954 cmd->se_ordered_id);
2011 return 1; 1955 return 1;
2012 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1956 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2013 spin_lock(&cmd->se_dev->ordered_cmd_lock);
2014 list_add_tail(&cmd->se_ordered_node,
2015 &cmd->se_dev->ordered_cmd_list);
2016 spin_unlock(&cmd->se_dev->ordered_cmd_lock);
2017
2018 atomic_inc(&cmd->se_dev->dev_ordered_sync); 1957 atomic_inc(&cmd->se_dev->dev_ordered_sync);
2019 smp_mb__after_atomic_inc(); 1958 smp_mb__after_atomic_inc();
2020 1959
@@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd)
2076{ 2015{
2077 int add_tasks; 2016 int add_tasks;
2078 2017
2079 if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { 2018 if (se_dev_check_online(cmd->se_dev) != 0) {
2080 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; 2019 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2081 transport_generic_request_failure(cmd, 0, 1); 2020 transport_generic_request_failure(cmd);
2082 return 0; 2021 return 0;
2083 } 2022 }
2084 2023
@@ -2163,14 +2102,13 @@ check_depth:
2163 else 2102 else
2164 error = dev->transport->do_task(task); 2103 error = dev->transport->do_task(task);
2165 if (error != 0) { 2104 if (error != 0) {
2166 cmd->transport_error_status = error;
2167 spin_lock_irqsave(&cmd->t_state_lock, flags); 2105 spin_lock_irqsave(&cmd->t_state_lock, flags);
2168 task->task_flags &= ~TF_ACTIVE; 2106 task->task_flags &= ~TF_ACTIVE;
2169 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2107 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2170 atomic_set(&cmd->t_transport_sent, 0); 2108 atomic_set(&cmd->t_transport_sent, 0);
2171 transport_stop_tasks_for_cmd(cmd); 2109 transport_stop_tasks_for_cmd(cmd);
2172 atomic_inc(&dev->depth_left); 2110 atomic_inc(&dev->depth_left);
2173 transport_generic_request_failure(cmd, 0, 1); 2111 transport_generic_request_failure(cmd);
2174 } 2112 }
2175 2113
2176 goto check_depth; 2114 goto check_depth;
@@ -2178,19 +2116,6 @@ check_depth:
2178 return 0; 2116 return 0;
2179} 2117}
2180 2118
2181void transport_new_cmd_failure(struct se_cmd *se_cmd)
2182{
2183 unsigned long flags;
2184 /*
2185 * Any unsolicited data will get dumped for failed command inside of
2186 * the fabric plugin
2187 */
2188 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2189 se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
2190 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2191 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2192}
2193
2194static inline u32 transport_get_sectors_6( 2119static inline u32 transport_get_sectors_6(
2195 unsigned char *cdb, 2120 unsigned char *cdb,
2196 struct se_cmd *cmd, 2121 struct se_cmd *cmd,
@@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6(
2213 2138
2214 /* 2139 /*
2215 * Everything else assume TYPE_DISK Sector CDB location. 2140 * Everything else assume TYPE_DISK Sector CDB location.
2216 * Use 8-bit sector value. 2141 * Use 8-bit sector value. SBC-3 says:
2142 *
2143 * A TRANSFER LENGTH field set to zero specifies that 256
2144 * logical blocks shall be written. Any other value
2145 * specifies the number of logical blocks that shall be
2146 * written.
2217 */ 2147 */
2218type_disk: 2148type_disk:
2219 return (u32)cdb[4]; 2149 return cdb[4] ? : 256;
2220} 2150}
2221 2151
2222static inline u32 transport_get_sectors_10( 2152static inline u32 transport_get_sectors_10(
@@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2460 return -1; 2390 return -1;
2461} 2391}
2462 2392
2463static int
2464transport_handle_reservation_conflict(struct se_cmd *cmd)
2465{
2466 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2467 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2468 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2469 /*
2470 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2471 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2472 * CONFLICT STATUS.
2473 *
2474 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2475 */
2476 if (cmd->se_sess &&
2477 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
2478 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
2479 cmd->orig_fe_lun, 0x2C,
2480 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2481 return -EINVAL;
2482}
2483
2484static inline long long transport_dev_end_lba(struct se_device *dev) 2393static inline long long transport_dev_end_lba(struct se_device *dev)
2485{ 2394{
2486 return dev->transport->get_blocks(dev) + 1; 2395 return dev->transport->get_blocks(dev) + 1;
@@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer(
2595 */ 2504 */
2596 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { 2505 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2597 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( 2506 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2598 cmd, cdb, pr_reg_type) != 0) 2507 cmd, cdb, pr_reg_type) != 0) {
2599 return transport_handle_reservation_conflict(cmd); 2508 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2509 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2510 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2511 return -EBUSY;
2512 }
2600 /* 2513 /*
2601 * This means the CDB is allowed for the SCSI Initiator port 2514 * This means the CDB is allowed for the SCSI Initiator port
2602 * when said port is *NOT* holding the legacy SPC-2 or 2515 * when said port is *NOT* holding the legacy SPC-2 or
@@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer(
2658 goto out_unsupported_cdb; 2571 goto out_unsupported_cdb;
2659 size = transport_get_size(sectors, cdb, cmd); 2572 size = transport_get_size(sectors, cdb, cmd);
2660 cmd->t_task_lba = transport_lba_32(cdb); 2573 cmd->t_task_lba = transport_lba_32(cdb);
2661 cmd->t_tasks_fua = (cdb[1] & 0x8); 2574 if (cdb[1] & 0x8)
2575 cmd->se_cmd_flags |= SCF_FUA;
2662 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2576 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2663 break; 2577 break;
2664 case WRITE_12: 2578 case WRITE_12:
@@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer(
2667 goto out_unsupported_cdb; 2581 goto out_unsupported_cdb;
2668 size = transport_get_size(sectors, cdb, cmd); 2582 size = transport_get_size(sectors, cdb, cmd);
2669 cmd->t_task_lba = transport_lba_32(cdb); 2583 cmd->t_task_lba = transport_lba_32(cdb);
2670 cmd->t_tasks_fua = (cdb[1] & 0x8); 2584 if (cdb[1] & 0x8)
2585 cmd->se_cmd_flags |= SCF_FUA;
2671 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2586 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2672 break; 2587 break;
2673 case WRITE_16: 2588 case WRITE_16:
@@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer(
2676 goto out_unsupported_cdb; 2591 goto out_unsupported_cdb;
2677 size = transport_get_size(sectors, cdb, cmd); 2592 size = transport_get_size(sectors, cdb, cmd);
2678 cmd->t_task_lba = transport_lba_64(cdb); 2593 cmd->t_task_lba = transport_lba_64(cdb);
2679 cmd->t_tasks_fua = (cdb[1] & 0x8); 2594 if (cdb[1] & 0x8)
2595 cmd->se_cmd_flags |= SCF_FUA;
2680 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2596 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2681 break; 2597 break;
2682 case XDWRITEREAD_10: 2598 case XDWRITEREAD_10:
2683 if ((cmd->data_direction != DMA_TO_DEVICE) || 2599 if ((cmd->data_direction != DMA_TO_DEVICE) ||
2684 !(cmd->t_tasks_bidi)) 2600 !(cmd->se_cmd_flags & SCF_BIDI))
2685 goto out_invalid_cdb_field; 2601 goto out_invalid_cdb_field;
2686 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2602 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2687 if (sector_ret) 2603 if (sector_ret)
@@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer(
2700 * Setup BIDI XOR callback to be run after I/O completion. 2616 * Setup BIDI XOR callback to be run after I/O completion.
2701 */ 2617 */
2702 cmd->transport_complete_callback = &transport_xor_callback; 2618 cmd->transport_complete_callback = &transport_xor_callback;
2703 cmd->t_tasks_fua = (cdb[1] & 0x8); 2619 if (cdb[1] & 0x8)
2620 cmd->se_cmd_flags |= SCF_FUA;
2704 break; 2621 break;
2705 case VARIABLE_LENGTH_CMD: 2622 case VARIABLE_LENGTH_CMD:
2706 service_action = get_unaligned_be16(&cdb[8]); 2623 service_action = get_unaligned_be16(&cdb[8]);
@@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
2728 * completion. 2645 * completion.
2729 */ 2646 */
2730 cmd->transport_complete_callback = &transport_xor_callback; 2647 cmd->transport_complete_callback = &transport_xor_callback;
2731 cmd->t_tasks_fua = (cdb[10] & 0x8); 2648 if (cdb[1] & 0x8)
2649 cmd->se_cmd_flags |= SCF_FUA;
2732 break; 2650 break;
2733 case WRITE_SAME_32: 2651 case WRITE_SAME_32:
2734 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret); 2652 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3171 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 3089 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3172 cmd->se_ordered_id); 3090 cmd->se_ordered_id);
3173 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 3091 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3174 atomic_dec(&dev->dev_hoq_count);
3175 smp_mb__after_atomic_dec();
3176 dev->dev_cur_ordered_id++; 3092 dev->dev_cur_ordered_id++;
3177 pr_debug("Incremented dev_cur_ordered_id: %u for" 3093 pr_debug("Incremented dev_cur_ordered_id: %u for"
3178 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 3094 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3179 cmd->se_ordered_id); 3095 cmd->se_ordered_id);
3180 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 3096 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3181 spin_lock(&dev->ordered_cmd_lock);
3182 list_del(&cmd->se_ordered_node);
3183 atomic_dec(&dev->dev_ordered_sync); 3097 atomic_dec(&dev->dev_ordered_sync);
3184 smp_mb__after_atomic_dec(); 3098 smp_mb__after_atomic_dec();
3185 spin_unlock(&dev->ordered_cmd_lock);
3186 3099
3187 dev->dev_cur_ordered_id++; 3100 dev->dev_cur_ordered_id++;
3188 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 3101 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd(
3495 3408
3496 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || 3409 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3497 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { 3410 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3411 /*
3412 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
3413 * scatterlists already have been set to follow what the fabric
3414 * passes for the original expected data transfer length.
3415 */
3416 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
3417 pr_warn("Rejecting SCSI DATA overflow for fabric using"
3418 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
3419 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3420 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3421 return -EINVAL;
3422 }
3498 3423
3499 cmd->t_data_sg = sgl; 3424 cmd->t_data_sg = sgl;
3500 cmd->t_data_nents = sgl_count; 3425 cmd->t_data_nents = sgl_count;
@@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
3813 cmd->data_length) { 3738 cmd->data_length) {
3814 ret = transport_generic_get_mem(cmd); 3739 ret = transport_generic_get_mem(cmd);
3815 if (ret < 0) 3740 if (ret < 0)
3816 return ret; 3741 goto out_fail;
3817 } 3742 }
3818 3743
3819 /* 3744 /*
@@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
3842 task_cdbs = transport_allocate_control_task(cmd); 3767 task_cdbs = transport_allocate_control_task(cmd);
3843 } 3768 }
3844 3769
3845 if (task_cdbs <= 0) 3770 if (task_cdbs < 0)
3846 goto out_fail; 3771 goto out_fail;
3772 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3773 cmd->t_state = TRANSPORT_COMPLETE;
3774 atomic_set(&cmd->t_transport_active, 1);
3775 INIT_WORK(&cmd->work, target_complete_ok_work);
3776 queue_work(target_completion_wq, &cmd->work);
3777 return 0;
3778 }
3847 3779
3848 if (set_counts) { 3780 if (set_counts) {
3849 atomic_inc(&cmd->t_fe_count); 3781 atomic_inc(&cmd->t_fe_count);
@@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
3929 else if (ret < 0) 3861 else if (ret < 0)
3930 return ret; 3862 return ret;
3931 3863
3932 return PYX_TRANSPORT_WRITE_PENDING; 3864 return 1;
3933 3865
3934queue_full: 3866queue_full:
3935 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 3867 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
4602 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 4534 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4603 atomic_inc(&cmd->t_transport_aborted); 4535 atomic_inc(&cmd->t_transport_aborted);
4604 smp_mb__after_atomic_inc(); 4536 smp_mb__after_atomic_inc();
4605 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4606 transport_new_cmd_failure(cmd);
4607 return;
4608 } 4537 }
4609 } 4538 }
4610 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 4539 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param)
4670 struct se_cmd *cmd; 4599 struct se_cmd *cmd;
4671 struct se_device *dev = (struct se_device *) param; 4600 struct se_device *dev = (struct se_device *) param;
4672 4601
4673 set_user_nice(current, -20);
4674
4675 while (!kthread_should_stop()) { 4602 while (!kthread_should_stop()) {
4676 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, 4603 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
4677 atomic_read(&dev->dev_queue_obj.queue_cnt) || 4604 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
@@ -4698,18 +4625,13 @@ get_cmd:
4698 } 4625 }
4699 ret = cmd->se_tfo->new_cmd_map(cmd); 4626 ret = cmd->se_tfo->new_cmd_map(cmd);
4700 if (ret < 0) { 4627 if (ret < 0) {
4701 cmd->transport_error_status = ret; 4628 transport_generic_request_failure(cmd);
4702 transport_generic_request_failure(cmd,
4703 0, (cmd->data_direction !=
4704 DMA_TO_DEVICE));
4705 break; 4629 break;
4706 } 4630 }
4707 ret = transport_generic_new_cmd(cmd); 4631 ret = transport_generic_new_cmd(cmd);
4708 if (ret < 0) { 4632 if (ret < 0) {
4709 cmd->transport_error_status = ret; 4633 transport_generic_request_failure(cmd);
4710 transport_generic_request_failure(cmd, 4634 break;
4711 0, (cmd->data_direction !=
4712 DMA_TO_DEVICE));
4713 } 4635 }
4714 break; 4636 break;
4715 case TRANSPORT_PROCESS_WRITE: 4637 case TRANSPORT_PROCESS_WRITE:
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 4fac37c4c615..71fc9cea5dc9 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
200 lport = ep->lp; 200 lport = ep->lp;
201 fp = fc_frame_alloc(lport, sizeof(*txrdy)); 201 fp = fc_frame_alloc(lport, sizeof(*txrdy));
202 if (!fp) 202 if (!fp)
203 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 203 return -ENOMEM; /* Signal QUEUE_FULL */
204 204
205 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); 205 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
206 memset(txrdy, 0, sizeof(*txrdy)); 206 memset(txrdy, 0, sizeof(*txrdy));
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 5f770412ca40..9402b7387cac 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn)
436 struct ft_lport_acl *lacl = container_of(wwn, 436 struct ft_lport_acl *lacl = container_of(wwn,
437 struct ft_lport_acl, fc_lport_wwn); 437 struct ft_lport_acl, fc_lport_wwn);
438 438
439 pr_debug("del lport %s\n", 439 pr_debug("del lport %s\n", lacl->name);
440 config_item_name(&wwn->wwn_group.cg_item));
441 mutex_lock(&ft_lport_lock); 440 mutex_lock(&ft_lport_lock);
442 list_del(&lacl->list); 441 list_del(&lacl->list);
443 mutex_unlock(&ft_lport_lock); 442 mutex_unlock(&ft_lport_lock);
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 435f6facbc23..44fbebab5075 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -46,6 +46,7 @@ static inline char __dcc_getchar(void)
46 46
47 asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg" 47 asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg"
48 : "=r" (__c)); 48 : "=r" (__c));
49 isb();
49 50
50 return __c; 51 return __c;
51} 52}
@@ -55,6 +56,7 @@ static inline void __dcc_putchar(char c)
55 asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char" 56 asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char"
56 : /* no output register */ 57 : /* no output register */
57 : "r" (c)); 58 : "r" (c));
59 isb();
58} 60}
59 61
60static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count) 62static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 4cb0d0a3e57b..fc7bbba585ce 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -66,14 +66,16 @@
66static int debug; 66static int debug;
67module_param(debug, int, 0600); 67module_param(debug, int, 0600);
68 68
69#define T1 (HZ/10) 69/* Defaults: these are from the specification */
70#define T2 (HZ/3) 70
71#define N2 3 71#define T1 10 /* 100mS */
72#define T2 34 /* 333mS */
73#define N2 3 /* Retry 3 times */
72 74
73/* Use long timers for testing at low speed with debug on */ 75/* Use long timers for testing at low speed with debug on */
74#ifdef DEBUG_TIMING 76#ifdef DEBUG_TIMING
75#define T1 HZ 77#define T1 100
76#define T2 (2 * HZ) 78#define T2 200
77#endif 79#endif
78 80
79/* 81/*
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 5f479dada6f2..925a1e547a83 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1560,7 +1560,7 @@ config SERIAL_IFX6X60
1560 Support for the IFX6x60 modem devices on Intel MID platforms. 1560 Support for the IFX6x60 modem devices on Intel MID platforms.
1561 1561
1562config SERIAL_PCH_UART 1562config SERIAL_PCH_UART
1563 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) UART" 1563 tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) UART"
1564 depends on PCI 1564 depends on PCI
1565 select SERIAL_CORE 1565 select SERIAL_CORE
1566 help 1566 help
@@ -1568,12 +1568,12 @@ config SERIAL_PCH_UART
1568 which is an IOH(Input/Output Hub) for x86 embedded processor. 1568 which is an IOH(Input/Output Hub) for x86 embedded processor.
1569 Enabling PCH_DMA, this PCH UART works as DMA mode. 1569 Enabling PCH_DMA, this PCH UART works as DMA mode.
1570 1570
1571 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 1571 This driver also can be used for LAPIS Semiconductor IOH(Input/
1572 Output Hub), ML7213 and ML7223. 1572 Output Hub), ML7213, ML7223 and ML7831.
1573 ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is 1573 ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
1574 for MP(Media Phone) use. 1574 for MP(Media Phone) use and ML7831 IOH is for general purpose use.
1575 ML7213/ML7223 is companion chip for Intel Atom E6xx series. 1575 ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
1576 ML7213/ML7223 is completely compatible for Intel EG20T PCH. 1576 ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
1577 1577
1578config SERIAL_MSM_SMD 1578config SERIAL_MSM_SMD
1579 bool "Enable tty device interface for some SMD ports" 1579 bool "Enable tty device interface for some SMD ports"
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 4a0f86fa1e90..4c823f341d98 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -228,7 +228,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
228 if (rs485conf->flags & SER_RS485_ENABLED) { 228 if (rs485conf->flags & SER_RS485_ENABLED) {
229 dev_dbg(port->dev, "Setting UART to RS485\n"); 229 dev_dbg(port->dev, "Setting UART to RS485\n");
230 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 230 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
231 if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) 231 if ((rs485conf->delay_rts_after_send) > 0)
232 UART_PUT_TTGR(port, rs485conf->delay_rts_after_send); 232 UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
233 mode |= ATMEL_US_USMODE_RS485; 233 mode |= ATMEL_US_USMODE_RS485;
234 } else { 234 } else {
@@ -304,7 +304,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
304 304
305 if (atmel_port->rs485.flags & SER_RS485_ENABLED) { 305 if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
306 dev_dbg(port->dev, "Setting UART to RS485\n"); 306 dev_dbg(port->dev, "Setting UART to RS485\n");
307 if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND) 307 if ((atmel_port->rs485.delay_rts_after_send) > 0)
308 UART_PUT_TTGR(port, 308 UART_PUT_TTGR(port,
309 atmel_port->rs485.delay_rts_after_send); 309 atmel_port->rs485.delay_rts_after_send);
310 mode |= ATMEL_US_USMODE_RS485; 310 mode |= ATMEL_US_USMODE_RS485;
@@ -1228,7 +1228,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
1228 1228
1229 if (atmel_port->rs485.flags & SER_RS485_ENABLED) { 1229 if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
1230 dev_dbg(port->dev, "Setting UART to RS485\n"); 1230 dev_dbg(port->dev, "Setting UART to RS485\n");
1231 if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND) 1231 if ((atmel_port->rs485.delay_rts_after_send) > 0)
1232 UART_PUT_TTGR(port, 1232 UART_PUT_TTGR(port,
1233 atmel_port->rs485.delay_rts_after_send); 1233 atmel_port->rs485.delay_rts_after_send);
1234 mode |= ATMEL_US_USMODE_RS485; 1234 mode |= ATMEL_US_USMODE_RS485;
@@ -1447,16 +1447,6 @@ static void __devinit atmel_of_init_port(struct atmel_uart_port *atmel_port,
1447 rs485conf->delay_rts_after_send = rs485_delay[1]; 1447 rs485conf->delay_rts_after_send = rs485_delay[1];
1448 rs485conf->flags = 0; 1448 rs485conf->flags = 0;
1449 1449
1450 if (rs485conf->delay_rts_before_send == 0 &&
1451 rs485conf->delay_rts_after_send == 0) {
1452 rs485conf->flags |= SER_RS485_RTS_ON_SEND;
1453 } else {
1454 if (rs485conf->delay_rts_before_send)
1455 rs485conf->flags |= SER_RS485_RTS_BEFORE_SEND;
1456 if (rs485conf->delay_rts_after_send)
1457 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
1458 }
1459
1460 if (of_get_property(np, "rs485-rx-during-tx", NULL)) 1450 if (of_get_property(np, "rs485-rx-during-tx", NULL))
1461 rs485conf->flags |= SER_RS485_RX_DURING_TX; 1451 rs485conf->flags |= SER_RS485_RX_DURING_TX;
1462 1452
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index b7435043f2fe..1dfba7b779c8 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -3234,9 +3234,8 @@ rs_write(struct tty_struct *tty,
3234 e100_disable_rx(info); 3234 e100_disable_rx(info);
3235 e100_enable_rx_irq(info); 3235 e100_enable_rx_irq(info);
3236#endif 3236#endif
3237 if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) && 3237 if (info->rs485.delay_rts_before_send > 0)
3238 (info->rs485.delay_rts_before_send > 0)) 3238 msleep(info->rs485.delay_rts_before_send);
3239 msleep(info->rs485.delay_rts_before_send);
3240 } 3239 }
3241#endif /* CONFIG_ETRAX_RS485 */ 3240#endif /* CONFIG_ETRAX_RS485 */
3242 3241
@@ -3693,10 +3692,6 @@ rs_ioctl(struct tty_struct *tty,
3693 3692
3694 rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send; 3693 rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send;
3695 rs485data.flags = 0; 3694 rs485data.flags = 0;
3696 if (rs485data.delay_rts_before_send != 0)
3697 rs485data.flags |= SER_RS485_RTS_BEFORE_SEND;
3698 else
3699 rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
3700 3695
3701 if (rs485ctrl.enabled) 3696 if (rs485ctrl.enabled)
3702 rs485data.flags |= SER_RS485_ENABLED; 3697 rs485data.flags |= SER_RS485_ENABLED;
@@ -4531,7 +4526,6 @@ static int __init rs_init(void)
4531 /* Set sane defaults */ 4526 /* Set sane defaults */
4532 info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND); 4527 info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND);
4533 info->rs485.flags |= SER_RS485_RTS_AFTER_SEND; 4528 info->rs485.flags |= SER_RS485_RTS_AFTER_SEND;
4534 info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
4535 info->rs485.delay_rts_before_send = 0; 4529 info->rs485.delay_rts_before_send = 0;
4536 info->rs485.flags &= ~(SER_RS485_ENABLED); 4530 info->rs485.flags &= ~(SER_RS485_ENABLED);
4537#endif 4531#endif
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index 286c386d9c46..e272d3919c67 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -884,7 +884,6 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
884{ 884{
885 struct uart_hsu_port *up = 885 struct uart_hsu_port *up =
886 container_of(port, struct uart_hsu_port, port); 886 container_of(port, struct uart_hsu_port, port);
887 struct tty_struct *tty = port->state->port.tty;
888 unsigned char cval, fcr = 0; 887 unsigned char cval, fcr = 0;
889 unsigned long flags; 888 unsigned long flags;
890 unsigned int baud, quot; 889 unsigned int baud, quot;
@@ -907,8 +906,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
907 } 906 }
908 907
909 /* CMSPAR isn't supported by this driver */ 908 /* CMSPAR isn't supported by this driver */
910 if (tty) 909 termios->c_cflag &= ~CMSPAR;
911 tty->termios->c_cflag &= ~CMSPAR;
912 910
913 if (termios->c_cflag & CSTOPB) 911 if (termios->c_cflag & CSTOPB)
914 cval |= UART_LCR_STOP; 912 cval |= UART_LCR_STOP;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 21febef926aa..d6aba8c087e4 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1,5 +1,5 @@
1/* 1/*
2 *Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. 2 *Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 * 3 *
4 *This program is free software; you can redistribute it and/or modify 4 *This program is free software; you can redistribute it and/or modify
5 *it under the terms of the GNU General Public License as published by 5 *it under the terms of the GNU General Public License as published by
@@ -46,8 +46,8 @@ enum {
46 46
47/* Set the max number of UART port 47/* Set the max number of UART port
48 * Intel EG20T PCH: 4 port 48 * Intel EG20T PCH: 4 port
49 * OKI SEMICONDUCTOR ML7213 IOH: 3 port 49 * LAPIS Semiconductor ML7213 IOH: 3 port
50 * OKI SEMICONDUCTOR ML7223 IOH: 2 port 50 * LAPIS Semiconductor ML7223 IOH: 2 port
51*/ 51*/
52#define PCH_UART_NR 4 52#define PCH_UART_NR 4
53 53
@@ -258,6 +258,8 @@ enum pch_uart_num_t {
258 pch_ml7213_uart2, 258 pch_ml7213_uart2,
259 pch_ml7223_uart0, 259 pch_ml7223_uart0,
260 pch_ml7223_uart1, 260 pch_ml7223_uart1,
261 pch_ml7831_uart0,
262 pch_ml7831_uart1,
261}; 263};
262 264
263static struct pch_uart_driver_data drv_dat[] = { 265static struct pch_uart_driver_data drv_dat[] = {
@@ -270,6 +272,8 @@ static struct pch_uart_driver_data drv_dat[] = {
270 [pch_ml7213_uart2] = {PCH_UART_2LINE, 2}, 272 [pch_ml7213_uart2] = {PCH_UART_2LINE, 2},
271 [pch_ml7223_uart0] = {PCH_UART_8LINE, 0}, 273 [pch_ml7223_uart0] = {PCH_UART_8LINE, 0},
272 [pch_ml7223_uart1] = {PCH_UART_2LINE, 1}, 274 [pch_ml7223_uart1] = {PCH_UART_2LINE, 1},
275 [pch_ml7831_uart0] = {PCH_UART_8LINE, 0},
276 [pch_ml7831_uart1] = {PCH_UART_2LINE, 1},
273}; 277};
274 278
275static unsigned int default_baud = 9600; 279static unsigned int default_baud = 9600;
@@ -628,6 +632,7 @@ static void pch_request_dma(struct uart_port *port)
628 dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n", 632 dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n",
629 __func__); 633 __func__);
630 dma_release_channel(priv->chan_tx); 634 dma_release_channel(priv->chan_tx);
635 priv->chan_tx = NULL;
631 return; 636 return;
632 } 637 }
633 638
@@ -1215,8 +1220,7 @@ static void pch_uart_shutdown(struct uart_port *port)
1215 dev_err(priv->port.dev, 1220 dev_err(priv->port.dev,
1216 "pch_uart_hal_set_fifo Failed(ret=%d)\n", ret); 1221 "pch_uart_hal_set_fifo Failed(ret=%d)\n", ret);
1217 1222
1218 if (priv->use_dma_flag) 1223 pch_free_dma(port);
1219 pch_free_dma(port);
1220 1224
1221 free_irq(priv->port.irq, priv); 1225 free_irq(priv->port.irq, priv);
1222} 1226}
@@ -1280,6 +1284,7 @@ static void pch_uart_set_termios(struct uart_port *port,
1280 if (rtn) 1284 if (rtn)
1281 goto out; 1285 goto out;
1282 1286
1287 pch_uart_set_mctrl(&priv->port, priv->port.mctrl);
1283 /* Don't rewrite B0 */ 1288 /* Don't rewrite B0 */
1284 if (tty_termios_baud_rate(termios)) 1289 if (tty_termios_baud_rate(termios))
1285 tty_termios_encode_baud_rate(termios, baud, baud); 1290 tty_termios_encode_baud_rate(termios, baud, baud);
@@ -1552,6 +1557,10 @@ static DEFINE_PCI_DEVICE_TABLE(pch_uart_pci_id) = {
1552 .driver_data = pch_ml7223_uart0}, 1557 .driver_data = pch_ml7223_uart0},
1553 {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D), 1558 {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D),
1554 .driver_data = pch_ml7223_uart1}, 1559 .driver_data = pch_ml7223_uart1},
1560 {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8811),
1561 .driver_data = pch_ml7831_uart0},
1562 {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8812),
1563 .driver_data = pch_ml7831_uart1},
1555 {0,}, 1564 {0,},
1556}; 1565};
1557 1566
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 1945c70539c2..aff9d612dff0 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -207,6 +207,25 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
207 }, 207 },
208 208
209 /* 209 /*
210 * Common SH-2(A) SCIF definitions for ports with FIFO data
211 * count registers.
212 */
213 [SCIx_SH2_SCIF_FIFODATA_REGTYPE] = {
214 [SCSMR] = { 0x00, 16 },
215 [SCBRR] = { 0x04, 8 },
216 [SCSCR] = { 0x08, 16 },
217 [SCxTDR] = { 0x0c, 8 },
218 [SCxSR] = { 0x10, 16 },
219 [SCxRDR] = { 0x14, 8 },
220 [SCFCR] = { 0x18, 16 },
221 [SCFDR] = { 0x1c, 16 },
222 [SCTFDR] = sci_reg_invalid,
223 [SCRFDR] = sci_reg_invalid,
224 [SCSPTR] = { 0x20, 16 },
225 [SCLSR] = { 0x24, 16 },
226 },
227
228 /*
210 * Common SH-3 SCIF definitions. 229 * Common SH-3 SCIF definitions.
211 */ 230 */
212 [SCIx_SH3_SCIF_REGTYPE] = { 231 [SCIx_SH3_SCIF_REGTYPE] = {
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 512c49f98e85..8e0924f55446 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -36,6 +36,7 @@
36 36
37#include <linux/kmod.h> 37#include <linux/kmod.h>
38#include <linux/nsproxy.h> 38#include <linux/nsproxy.h>
39#include <linux/ratelimit.h>
39 40
40/* 41/*
41 * This guards the refcounted line discipline lists. The lock 42 * This guards the refcounted line discipline lists. The lock
@@ -547,15 +548,16 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
547/** 548/**
548 * tty_ldisc_wait_idle - wait for the ldisc to become idle 549 * tty_ldisc_wait_idle - wait for the ldisc to become idle
549 * @tty: tty to wait for 550 * @tty: tty to wait for
551 * @timeout: for how long to wait at most
550 * 552 *
551 * Wait for the line discipline to become idle. The discipline must 553 * Wait for the line discipline to become idle. The discipline must
552 * have been halted for this to guarantee it remains idle. 554 * have been halted for this to guarantee it remains idle.
553 */ 555 */
554static int tty_ldisc_wait_idle(struct tty_struct *tty) 556static int tty_ldisc_wait_idle(struct tty_struct *tty, long timeout)
555{ 557{
556 int ret; 558 long ret;
557 ret = wait_event_timeout(tty_ldisc_idle, 559 ret = wait_event_timeout(tty_ldisc_idle,
558 atomic_read(&tty->ldisc->users) == 1, 5 * HZ); 560 atomic_read(&tty->ldisc->users) == 1, timeout);
559 if (ret < 0) 561 if (ret < 0)
560 return ret; 562 return ret;
561 return ret > 0 ? 0 : -EBUSY; 563 return ret > 0 ? 0 : -EBUSY;
@@ -665,7 +667,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
665 667
666 tty_ldisc_flush_works(tty); 668 tty_ldisc_flush_works(tty);
667 669
668 retval = tty_ldisc_wait_idle(tty); 670 retval = tty_ldisc_wait_idle(tty, 5 * HZ);
669 671
670 tty_lock(); 672 tty_lock();
671 mutex_lock(&tty->ldisc_mutex); 673 mutex_lock(&tty->ldisc_mutex);
@@ -762,8 +764,6 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
762 if (IS_ERR(ld)) 764 if (IS_ERR(ld))
763 return -1; 765 return -1;
764 766
765 WARN_ON_ONCE(tty_ldisc_wait_idle(tty));
766
767 tty_ldisc_close(tty, tty->ldisc); 767 tty_ldisc_close(tty, tty->ldisc);
768 tty_ldisc_put(tty->ldisc); 768 tty_ldisc_put(tty->ldisc);
769 tty->ldisc = NULL; 769 tty->ldisc = NULL;
@@ -838,7 +838,7 @@ void tty_ldisc_hangup(struct tty_struct *tty)
838 tty_unlock(); 838 tty_unlock();
839 cancel_work_sync(&tty->buf.work); 839 cancel_work_sync(&tty->buf.work);
840 mutex_unlock(&tty->ldisc_mutex); 840 mutex_unlock(&tty->ldisc_mutex);
841 841retry:
842 tty_lock(); 842 tty_lock();
843 mutex_lock(&tty->ldisc_mutex); 843 mutex_lock(&tty->ldisc_mutex);
844 844
@@ -847,6 +847,22 @@ void tty_ldisc_hangup(struct tty_struct *tty)
847 it means auditing a lot of other paths so this is 847 it means auditing a lot of other paths so this is
848 a FIXME */ 848 a FIXME */
849 if (tty->ldisc) { /* Not yet closed */ 849 if (tty->ldisc) { /* Not yet closed */
850 if (atomic_read(&tty->ldisc->users) != 1) {
851 char cur_n[TASK_COMM_LEN], tty_n[64];
852 long timeout = 3 * HZ;
853 tty_unlock();
854
855 while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
856 timeout = MAX_SCHEDULE_TIMEOUT;
857 printk_ratelimited(KERN_WARNING
858 "%s: waiting (%s) for %s took too long, but we keep waiting...\n",
859 __func__, get_task_comm(cur_n, current),
860 tty_name(tty, tty_n));
861 }
862 mutex_unlock(&tty->ldisc_mutex);
863 goto retry;
864 }
865
850 if (reset == 0) { 866 if (reset == 0) {
851 867
852 if (!tty_ldisc_reinit(tty, tty->termios->c_line)) 868 if (!tty_ldisc_reinit(tty, tty->termios->c_line))
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 6960715c5063..a8078d0638fa 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -539,7 +539,6 @@ static void acm_port_down(struct acm *acm)
539{ 539{
540 int i; 540 int i;
541 541
542 mutex_lock(&open_mutex);
543 if (acm->dev) { 542 if (acm->dev) {
544 usb_autopm_get_interface(acm->control); 543 usb_autopm_get_interface(acm->control);
545 acm_set_control(acm, acm->ctrlout = 0); 544 acm_set_control(acm, acm->ctrlout = 0);
@@ -551,14 +550,15 @@ static void acm_port_down(struct acm *acm)
551 acm->control->needs_remote_wakeup = 0; 550 acm->control->needs_remote_wakeup = 0;
552 usb_autopm_put_interface(acm->control); 551 usb_autopm_put_interface(acm->control);
553 } 552 }
554 mutex_unlock(&open_mutex);
555} 553}
556 554
557static void acm_tty_hangup(struct tty_struct *tty) 555static void acm_tty_hangup(struct tty_struct *tty)
558{ 556{
559 struct acm *acm = tty->driver_data; 557 struct acm *acm = tty->driver_data;
560 tty_port_hangup(&acm->port); 558 tty_port_hangup(&acm->port);
559 mutex_lock(&open_mutex);
561 acm_port_down(acm); 560 acm_port_down(acm);
561 mutex_unlock(&open_mutex);
562} 562}
563 563
564static void acm_tty_close(struct tty_struct *tty, struct file *filp) 564static void acm_tty_close(struct tty_struct *tty, struct file *filp)
@@ -569,8 +569,9 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
569 shutdown */ 569 shutdown */
570 if (!acm) 570 if (!acm)
571 return; 571 return;
572
573 mutex_lock(&open_mutex);
572 if (tty_port_close_start(&acm->port, tty, filp) == 0) { 574 if (tty_port_close_start(&acm->port, tty, filp) == 0) {
573 mutex_lock(&open_mutex);
574 if (!acm->dev) { 575 if (!acm->dev) {
575 tty_port_tty_set(&acm->port, NULL); 576 tty_port_tty_set(&acm->port, NULL);
576 acm_tty_unregister(acm); 577 acm_tty_unregister(acm);
@@ -582,6 +583,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
582 acm_port_down(acm); 583 acm_port_down(acm);
583 tty_port_close_end(&acm->port, tty); 584 tty_port_close_end(&acm->port, tty);
584 tty_port_tty_set(&acm->port, NULL); 585 tty_port_tty_set(&acm->port, NULL);
586 mutex_unlock(&open_mutex);
585} 587}
586 588
587static int acm_tty_write(struct tty_struct *tty, 589static int acm_tty_write(struct tty_struct *tty,
@@ -1456,6 +1458,16 @@ static const struct usb_device_id acm_ids[] = {
1456 }, 1458 },
1457 { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */ 1459 { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
1458 }, 1460 },
1461 /* Motorola H24 HSPA module: */
1462 { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
1463 { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */
1464 { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */
1465 { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */
1466 { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */
1467 { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */
1468 { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */
1469 { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
1470
1459 { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */ 1471 { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
1460 .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on 1472 .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
1461 data interface instead of 1473 data interface instead of
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 96f05b29c9ad..79781461eec9 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -813,6 +813,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
813 USB_PORT_FEAT_C_PORT_LINK_STATE); 813 USB_PORT_FEAT_C_PORT_LINK_STATE);
814 } 814 }
815 815
816 if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
817 hub_is_superspeed(hub->hdev)) {
818 need_debounce_delay = true;
819 clear_port_feature(hub->hdev, port1,
820 USB_PORT_FEAT_C_BH_PORT_RESET);
821 }
816 /* We can forget about a "removed" device when there's a 822 /* We can forget about a "removed" device when there's a
817 * physical disconnect or the connect status changes. 823 * physical disconnect or the connect status changes.
818 */ 824 */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d6a8d8269bfb..ecf12e15a7ef 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -50,15 +50,42 @@ static const struct usb_device_id usb_quirk_list[] = {
50 /* Logitech Webcam B/C500 */ 50 /* Logitech Webcam B/C500 */
51 { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME }, 51 { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME },
52 52
53 /* Logitech Webcam C600 */
54 { USB_DEVICE(0x046d, 0x0808), .driver_info = USB_QUIRK_RESET_RESUME },
55
53 /* Logitech Webcam Pro 9000 */ 56 /* Logitech Webcam Pro 9000 */
54 { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME }, 57 { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME },
55 58
59 /* Logitech Webcam C905 */
60 { USB_DEVICE(0x046d, 0x080a), .driver_info = USB_QUIRK_RESET_RESUME },
61
62 /* Logitech Webcam C210 */
63 { USB_DEVICE(0x046d, 0x0819), .driver_info = USB_QUIRK_RESET_RESUME },
64
65 /* Logitech Webcam C260 */
66 { USB_DEVICE(0x046d, 0x081a), .driver_info = USB_QUIRK_RESET_RESUME },
67
56 /* Logitech Webcam C310 */ 68 /* Logitech Webcam C310 */
57 { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME }, 69 { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME },
58 70
71 /* Logitech Webcam C910 */
72 { USB_DEVICE(0x046d, 0x0821), .driver_info = USB_QUIRK_RESET_RESUME },
73
74 /* Logitech Webcam C160 */
75 { USB_DEVICE(0x046d, 0x0824), .driver_info = USB_QUIRK_RESET_RESUME },
76
59 /* Logitech Webcam C270 */ 77 /* Logitech Webcam C270 */
60 { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, 78 { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
61 79
80 /* Logitech Quickcam Pro 9000 */
81 { USB_DEVICE(0x046d, 0x0990), .driver_info = USB_QUIRK_RESET_RESUME },
82
83 /* Logitech Quickcam E3500 */
84 { USB_DEVICE(0x046d, 0x09a4), .driver_info = USB_QUIRK_RESET_RESUME },
85
86 /* Logitech Quickcam Vision Pro */
87 { USB_DEVICE(0x046d, 0x09a6), .driver_info = USB_QUIRK_RESET_RESUME },
88
62 /* Logitech Harmony 700-series */ 89 /* Logitech Harmony 700-series */
63 { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT }, 90 { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
64 91
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index fa824cfdd2eb..25dbd8614e72 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1284,6 +1284,7 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1284 int ret; 1284 int ret;
1285 1285
1286 dep->endpoint.maxpacket = 1024; 1286 dep->endpoint.maxpacket = 1024;
1287 dep->endpoint.max_streams = 15;
1287 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1288 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1288 list_add_tail(&dep->endpoint.ep_list, 1289 list_add_tail(&dep->endpoint.ep_list,
1289 &dwc->gadget.ep_list); 1290 &dwc->gadget.ep_list);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b21cd376c11a..23a447373c51 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -469,7 +469,7 @@ config USB_LANGWELL
469 gadget drivers to also be dynamically linked. 469 gadget drivers to also be dynamically linked.
470 470
471config USB_EG20T 471config USB_EG20T
472 tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC" 472 tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
473 depends on PCI 473 depends on PCI
474 select USB_GADGET_DUALSPEED 474 select USB_GADGET_DUALSPEED
475 help 475 help
@@ -485,10 +485,11 @@ config USB_EG20T
485 This driver dose not support interrupt transfer or isochronous 485 This driver dose not support interrupt transfer or isochronous
486 transfer modes. 486 transfer modes.
487 487
488 This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is 488 This driver also can be used for LAPIS Semiconductor's ML7213 which is
489 for IVI(In-Vehicle Infotainment) use. 489 for IVI(In-Vehicle Infotainment) use.
490 ML7213 is companion chip for Intel Atom E6xx series. 490 ML7831 is for general purpose use.
491 ML7213 is completely compatible for Intel EG20T PCH. 491 ML7213/ML7831 is companion chip for Intel Atom E6xx series.
492 ML7213/ML7831 is completely compatible for Intel EG20T PCH.
492 493
493config USB_CI13XXX_MSM 494config USB_CI13XXX_MSM
494 tristate "MIPS USB CI13xxx for MSM" 495 tristate "MIPS USB CI13xxx for MSM"
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 4730016d7cd4..45f422ac103f 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver,
1959 u32 tmp; 1959 u32 tmp;
1960 1960
1961 if (!driver || !bind || !driver->setup 1961 if (!driver || !bind || !driver->setup
1962 || driver->speed != USB_SPEED_HIGH) 1962 || driver->speed < USB_SPEED_HIGH)
1963 return -EINVAL; 1963 return -EINVAL;
1964 if (!dev) 1964 if (!dev)
1965 return -ENODEV; 1965 return -ENODEV;
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index 4eedfe557154..1fc612914c52 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -122,3 +122,5 @@ static int __init ci13xxx_msm_init(void)
122 return platform_driver_register(&ci13xxx_msm_driver); 122 return platform_driver_register(&ci13xxx_msm_driver);
123} 123}
124module_init(ci13xxx_msm_init); 124module_init(ci13xxx_msm_init);
125
126MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 83428f56253b..9a0c3979ff43 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -71,6 +71,9 @@
71/****************************************************************************** 71/******************************************************************************
72 * DEFINE 72 * DEFINE
73 *****************************************************************************/ 73 *****************************************************************************/
74
75#define DMA_ADDR_INVALID (~(dma_addr_t)0)
76
74/* ctrl register bank access */ 77/* ctrl register bank access */
75static DEFINE_SPINLOCK(udc_lock); 78static DEFINE_SPINLOCK(udc_lock);
76 79
@@ -1434,7 +1437,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1434 return -EALREADY; 1437 return -EALREADY;
1435 1438
1436 mReq->req.status = -EALREADY; 1439 mReq->req.status = -EALREADY;
1437 if (length && !mReq->req.dma) { 1440 if (length && mReq->req.dma == DMA_ADDR_INVALID) {
1438 mReq->req.dma = \ 1441 mReq->req.dma = \
1439 dma_map_single(mEp->device, mReq->req.buf, 1442 dma_map_single(mEp->device, mReq->req.buf,
1440 length, mEp->dir ? DMA_TO_DEVICE : 1443 length, mEp->dir ? DMA_TO_DEVICE :
@@ -1453,7 +1456,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1453 dma_unmap_single(mEp->device, mReq->req.dma, 1456 dma_unmap_single(mEp->device, mReq->req.dma,
1454 length, mEp->dir ? DMA_TO_DEVICE : 1457 length, mEp->dir ? DMA_TO_DEVICE :
1455 DMA_FROM_DEVICE); 1458 DMA_FROM_DEVICE);
1456 mReq->req.dma = 0; 1459 mReq->req.dma = DMA_ADDR_INVALID;
1457 mReq->map = 0; 1460 mReq->map = 0;
1458 } 1461 }
1459 return -ENOMEM; 1462 return -ENOMEM;
@@ -1549,7 +1552,7 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1549 if (mReq->map) { 1552 if (mReq->map) {
1550 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, 1553 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
1551 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1554 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1552 mReq->req.dma = 0; 1555 mReq->req.dma = DMA_ADDR_INVALID;
1553 mReq->map = 0; 1556 mReq->map = 0;
1554 } 1557 }
1555 1558
@@ -1610,7 +1613,6 @@ __acquires(mEp->lock)
1610 * @gadget: gadget 1613 * @gadget: gadget
1611 * 1614 *
1612 * This function returns an error code 1615 * This function returns an error code
1613 * Caller must hold lock
1614 */ 1616 */
1615static int _gadget_stop_activity(struct usb_gadget *gadget) 1617static int _gadget_stop_activity(struct usb_gadget *gadget)
1616{ 1618{
@@ -2189,6 +2191,7 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
2189 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags); 2191 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
2190 if (mReq != NULL) { 2192 if (mReq != NULL) {
2191 INIT_LIST_HEAD(&mReq->queue); 2193 INIT_LIST_HEAD(&mReq->queue);
2194 mReq->req.dma = DMA_ADDR_INVALID;
2192 2195
2193 mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags, 2196 mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
2194 &mReq->dma); 2197 &mReq->dma);
@@ -2328,7 +2331,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2328 if (mReq->map) { 2331 if (mReq->map) {
2329 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, 2332 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
2330 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 2333 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2331 mReq->req.dma = 0; 2334 mReq->req.dma = DMA_ADDR_INVALID;
2332 mReq->map = 0; 2335 mReq->map = 0;
2333 } 2336 }
2334 req->status = -ECONNRESET; 2337 req->status = -ECONNRESET;
@@ -2500,12 +2503,12 @@ static int ci13xxx_wakeup(struct usb_gadget *_gadget)
2500 spin_lock_irqsave(udc->lock, flags); 2503 spin_lock_irqsave(udc->lock, flags);
2501 if (!udc->remote_wakeup) { 2504 if (!udc->remote_wakeup) {
2502 ret = -EOPNOTSUPP; 2505 ret = -EOPNOTSUPP;
2503 dbg_trace("remote wakeup feature is not enabled\n"); 2506 trace("remote wakeup feature is not enabled\n");
2504 goto out; 2507 goto out;
2505 } 2508 }
2506 if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) { 2509 if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
2507 ret = -EINVAL; 2510 ret = -EINVAL;
2508 dbg_trace("port is not suspended\n"); 2511 trace("port is not suspended\n");
2509 goto out; 2512 goto out;
2510 } 2513 }
2511 hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR); 2514 hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
@@ -2703,7 +2706,9 @@ static int ci13xxx_stop(struct usb_gadget_driver *driver)
2703 if (udc->udc_driver->notify_event) 2706 if (udc->udc_driver->notify_event)
2704 udc->udc_driver->notify_event(udc, 2707 udc->udc_driver->notify_event(udc,
2705 CI13XXX_CONTROLLER_STOPPED_EVENT); 2708 CI13XXX_CONTROLLER_STOPPED_EVENT);
2709 spin_unlock_irqrestore(udc->lock, flags);
2706 _gadget_stop_activity(&udc->gadget); 2710 _gadget_stop_activity(&udc->gadget);
2711 spin_lock_irqsave(udc->lock, flags);
2707 pm_runtime_put(&udc->gadget.dev); 2712 pm_runtime_put(&udc->gadget.dev);
2708 } 2713 }
2709 2714
@@ -2850,7 +2855,7 @@ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
2850 struct ci13xxx *udc; 2855 struct ci13xxx *udc;
2851 int retval = 0; 2856 int retval = 0;
2852 2857
2853 trace("%p, %p, %p", dev, regs, name); 2858 trace("%p, %p, %p", dev, regs, driver->name);
2854 2859
2855 if (dev == NULL || regs == NULL || driver == NULL || 2860 if (dev == NULL || regs == NULL || driver == NULL ||
2856 driver->name == NULL) 2861 driver->name == NULL)
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 52583a235330..1a6f415c0d02 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -624,7 +624,8 @@ static int fsg_setup(struct usb_function *f,
624 if (ctrl->bRequestType != 624 if (ctrl->bRequestType !=
625 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 625 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
626 break; 626 break;
627 if (w_index != fsg->interface_number || w_value != 0) 627 if (w_index != fsg->interface_number || w_value != 0 ||
628 w_length != 0)
628 return -EDOM; 629 return -EDOM;
629 630
630 /* 631 /*
@@ -639,7 +640,8 @@ static int fsg_setup(struct usb_function *f,
639 if (ctrl->bRequestType != 640 if (ctrl->bRequestType !=
640 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 641 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
641 break; 642 break;
642 if (w_index != fsg->interface_number || w_value != 0) 643 if (w_index != fsg->interface_number || w_value != 0 ||
644 w_length != 1)
643 return -EDOM; 645 return -EDOM;
644 VDBG(fsg, "get max LUN\n"); 646 VDBG(fsg, "get max LUN\n");
645 *(u8 *)req->buf = fsg->common->nluns - 1; 647 *(u8 *)req->buf = fsg->common->nluns - 1;
@@ -2973,6 +2975,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2973 fsg_common_put(common); 2975 fsg_common_put(common);
2974 usb_free_descriptors(fsg->function.descriptors); 2976 usb_free_descriptors(fsg->function.descriptors);
2975 usb_free_descriptors(fsg->function.hs_descriptors); 2977 usb_free_descriptors(fsg->function.hs_descriptors);
2978 usb_free_descriptors(fsg->function.ss_descriptors);
2976 kfree(fsg); 2979 kfree(fsg);
2977} 2980}
2978 2981
diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c
index 67b222908cf9..3797b3d6c622 100644
--- a/drivers/usb/gadget/f_midi.c
+++ b/drivers/usb/gadget/f_midi.c
@@ -95,7 +95,6 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req);
95 95
96DECLARE_UAC_AC_HEADER_DESCRIPTOR(1); 96DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
97DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1); 97DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
98DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(16);
99DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16); 98DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16);
100 99
101/* B.3.1 Standard AC Interface Descriptor */ 100/* B.3.1 Standard AC Interface Descriptor */
@@ -140,26 +139,6 @@ static struct usb_ms_header_descriptor ms_header_desc __initdata = {
140 /* .wTotalLength = DYNAMIC */ 139 /* .wTotalLength = DYNAMIC */
141}; 140};
142 141
143/* B.4.3 Embedded MIDI IN Jack Descriptor */
144static struct usb_midi_in_jack_descriptor jack_in_emb_desc = {
145 .bLength = USB_DT_MIDI_IN_SIZE,
146 .bDescriptorType = USB_DT_CS_INTERFACE,
147 .bDescriptorSubtype = USB_MS_MIDI_IN_JACK,
148 .bJackType = USB_MS_EMBEDDED,
149 /* .bJackID = DYNAMIC */
150};
151
152/* B.4.4 Embedded MIDI OUT Jack Descriptor */
153static struct usb_midi_out_jack_descriptor_16 jack_out_emb_desc = {
154 /* .bLength = DYNAMIC */
155 .bDescriptorType = USB_DT_CS_INTERFACE,
156 .bDescriptorSubtype = USB_MS_MIDI_OUT_JACK,
157 .bJackType = USB_MS_EMBEDDED,
158 /* .bJackID = DYNAMIC */
159 /* .bNrInputPins = DYNAMIC */
160 /* .pins = DYNAMIC */
161};
162
163/* B.5.1 Standard Bulk OUT Endpoint Descriptor */ 142/* B.5.1 Standard Bulk OUT Endpoint Descriptor */
164static struct usb_endpoint_descriptor bulk_out_desc = { 143static struct usb_endpoint_descriptor bulk_out_desc = {
165 .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, 144 .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
@@ -758,9 +737,11 @@ fail:
758static int __init 737static int __init
759f_midi_bind(struct usb_configuration *c, struct usb_function *f) 738f_midi_bind(struct usb_configuration *c, struct usb_function *f)
760{ 739{
761 struct usb_descriptor_header *midi_function[(MAX_PORTS * 2) + 12]; 740 struct usb_descriptor_header **midi_function;
762 struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS]; 741 struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
742 struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS];
763 struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS]; 743 struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS];
744 struct usb_midi_out_jack_descriptor_1 jack_out_emb_desc[MAX_PORTS];
764 struct usb_composite_dev *cdev = c->cdev; 745 struct usb_composite_dev *cdev = c->cdev;
765 struct f_midi *midi = func_to_midi(f); 746 struct f_midi *midi = func_to_midi(f);
766 int status, n, jack = 1, i = 0; 747 int status, n, jack = 1, i = 0;
@@ -798,6 +779,14 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
798 goto fail; 779 goto fail;
799 midi->out_ep->driver_data = cdev; /* claim */ 780 midi->out_ep->driver_data = cdev; /* claim */
800 781
782 /* allocate temporary function list */
783 midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(midi_function),
784 GFP_KERNEL);
785 if (!midi_function) {
786 status = -ENOMEM;
787 goto fail;
788 }
789
801 /* 790 /*
802 * construct the function's descriptor set. As the number of 791 * construct the function's descriptor set. As the number of
803 * input and output MIDI ports is configurable, we have to do 792 * input and output MIDI ports is configurable, we have to do
@@ -811,73 +800,74 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
811 800
812 /* calculate the header's wTotalLength */ 801 /* calculate the header's wTotalLength */
813 n = USB_DT_MS_HEADER_SIZE 802 n = USB_DT_MS_HEADER_SIZE
814 + (1 + midi->in_ports) * USB_DT_MIDI_IN_SIZE 803 + (midi->in_ports + midi->out_ports) *
815 + (1 + midi->out_ports) * USB_DT_MIDI_OUT_SIZE(1); 804 (USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1));
816 ms_header_desc.wTotalLength = cpu_to_le16(n); 805 ms_header_desc.wTotalLength = cpu_to_le16(n);
817 806
818 midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc; 807 midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc;
819 808
820 /* we have one embedded IN jack */ 809 /* configure the external IN jacks, each linked to an embedded OUT jack */
821 jack_in_emb_desc.bJackID = jack++;
822 midi_function[i++] = (struct usb_descriptor_header *) &jack_in_emb_desc;
823
824 /* and a dynamic amount of external IN jacks */
825 for (n = 0; n < midi->in_ports; n++) {
826 struct usb_midi_in_jack_descriptor *ext = &jack_in_ext_desc[n];
827
828 ext->bLength = USB_DT_MIDI_IN_SIZE;
829 ext->bDescriptorType = USB_DT_CS_INTERFACE;
830 ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
831 ext->bJackType = USB_MS_EXTERNAL;
832 ext->bJackID = jack++;
833 ext->iJack = 0;
834
835 midi_function[i++] = (struct usb_descriptor_header *) ext;
836 }
837
838 /* one embedded OUT jack ... */
839 jack_out_emb_desc.bLength = USB_DT_MIDI_OUT_SIZE(midi->in_ports);
840 jack_out_emb_desc.bJackID = jack++;
841 jack_out_emb_desc.bNrInputPins = midi->in_ports;
842 /* ... which referencess all external IN jacks */
843 for (n = 0; n < midi->in_ports; n++) { 810 for (n = 0; n < midi->in_ports; n++) {
844 jack_out_emb_desc.pins[n].baSourceID = jack_in_ext_desc[n].bJackID; 811 struct usb_midi_in_jack_descriptor *in_ext = &jack_in_ext_desc[n];
845 jack_out_emb_desc.pins[n].baSourcePin = 1; 812 struct usb_midi_out_jack_descriptor_1 *out_emb = &jack_out_emb_desc[n];
813
814 in_ext->bLength = USB_DT_MIDI_IN_SIZE;
815 in_ext->bDescriptorType = USB_DT_CS_INTERFACE;
816 in_ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
817 in_ext->bJackType = USB_MS_EXTERNAL;
818 in_ext->bJackID = jack++;
819 in_ext->iJack = 0;
820 midi_function[i++] = (struct usb_descriptor_header *) in_ext;
821
822 out_emb->bLength = USB_DT_MIDI_OUT_SIZE(1);
823 out_emb->bDescriptorType = USB_DT_CS_INTERFACE;
824 out_emb->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
825 out_emb->bJackType = USB_MS_EMBEDDED;
826 out_emb->bJackID = jack++;
827 out_emb->bNrInputPins = 1;
828 out_emb->pins[0].baSourcePin = 1;
829 out_emb->pins[0].baSourceID = in_ext->bJackID;
830 out_emb->iJack = 0;
831 midi_function[i++] = (struct usb_descriptor_header *) out_emb;
832
833 /* link it to the endpoint */
834 ms_in_desc.baAssocJackID[n] = out_emb->bJackID;
846 } 835 }
847 836
848 midi_function[i++] = (struct usb_descriptor_header *) &jack_out_emb_desc; 837 /* configure the external OUT jacks, each linked to an embedded IN jack */
849
850 /* and multiple external OUT jacks ... */
851 for (n = 0; n < midi->out_ports; n++) { 838 for (n = 0; n < midi->out_ports; n++) {
852 struct usb_midi_out_jack_descriptor_1 *ext = &jack_out_ext_desc[n]; 839 struct usb_midi_in_jack_descriptor *in_emb = &jack_in_emb_desc[n];
853 int m; 840 struct usb_midi_out_jack_descriptor_1 *out_ext = &jack_out_ext_desc[n];
854 841
855 ext->bLength = USB_DT_MIDI_OUT_SIZE(1); 842 in_emb->bLength = USB_DT_MIDI_IN_SIZE;
856 ext->bDescriptorType = USB_DT_CS_INTERFACE; 843 in_emb->bDescriptorType = USB_DT_CS_INTERFACE;
857 ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK; 844 in_emb->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
858 ext->bJackType = USB_MS_EXTERNAL; 845 in_emb->bJackType = USB_MS_EMBEDDED;
859 ext->bJackID = jack++; 846 in_emb->bJackID = jack++;
860 ext->bNrInputPins = 1; 847 in_emb->iJack = 0;
861 ext->iJack = 0; 848 midi_function[i++] = (struct usb_descriptor_header *) in_emb;
862 /* ... which all reference the same embedded IN jack */ 849
863 for (m = 0; m < midi->out_ports; m++) { 850 out_ext->bLength = USB_DT_MIDI_OUT_SIZE(1);
864 ext->pins[m].baSourceID = jack_in_emb_desc.bJackID; 851 out_ext->bDescriptorType = USB_DT_CS_INTERFACE;
865 ext->pins[m].baSourcePin = 1; 852 out_ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
866 } 853 out_ext->bJackType = USB_MS_EXTERNAL;
867 854 out_ext->bJackID = jack++;
868 midi_function[i++] = (struct usb_descriptor_header *) ext; 855 out_ext->bNrInputPins = 1;
856 out_ext->iJack = 0;
857 out_ext->pins[0].baSourceID = in_emb->bJackID;
858 out_ext->pins[0].baSourcePin = 1;
859 midi_function[i++] = (struct usb_descriptor_header *) out_ext;
860
861 /* link it to the endpoint */
862 ms_out_desc.baAssocJackID[n] = in_emb->bJackID;
869 } 863 }
870 864
871 /* configure the endpoint descriptors ... */ 865 /* configure the endpoint descriptors ... */
872 ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports); 866 ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
873 ms_out_desc.bNumEmbMIDIJack = midi->in_ports; 867 ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
874 for (n = 0; n < midi->in_ports; n++)
875 ms_out_desc.baAssocJackID[n] = jack_in_emb_desc.bJackID;
876 868
877 ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports); 869 ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
878 ms_in_desc.bNumEmbMIDIJack = midi->out_ports; 870 ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
879 for (n = 0; n < midi->out_ports; n++)
880 ms_in_desc.baAssocJackID[n] = jack_out_emb_desc.bJackID;
881 871
882 /* ... and add them to the list */ 872 /* ... and add them to the list */
883 midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc; 873 midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc;
@@ -901,6 +891,8 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
901 f->descriptors = usb_copy_descriptors(midi_function); 891 f->descriptors = usb_copy_descriptors(midi_function);
902 } 892 }
903 893
894 kfree(midi_function);
895
904 return 0; 896 return 0;
905 897
906fail: 898fail:
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 349077033338..16a509ae517b 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -346,7 +346,7 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
346 } 346 }
347 347
348 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 348 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
349 skb->len == 0, req->actual); 349 skb->len <= 1, req->actual);
350 page = NULL; 350 page = NULL;
351 351
352 if (req->actual < req->length) { /* Last fragment */ 352 if (req->actual < req->length) { /* Last fragment */
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 91fdf790ed20..cf33a8d0fd5d 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -131,8 +131,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
131 } 131 }
132 if (!gser->port.in->desc || !gser->port.out->desc) { 132 if (!gser->port.in->desc || !gser->port.out->desc) {
133 DBG(cdev, "activate generic ttyGS%d\n", gser->port_num); 133 DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
134 if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) || 134 if (config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
135 !config_ep_by_speed(cdev->gadget, f, gser->port.out)) { 135 config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
136 gser->port.in->desc = NULL; 136 gser->port.in->desc = NULL;
137 gser->port.out->desc = NULL; 137 gser->port.out->desc = NULL;
138 return -EINVAL; 138 return -EINVAL;
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index f7e39b0365ce..11b5196284ae 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -859,7 +859,7 @@ static int class_setup_req(struct fsg_dev *fsg,
859 if (ctrl->bRequestType != (USB_DIR_OUT | 859 if (ctrl->bRequestType != (USB_DIR_OUT |
860 USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 860 USB_TYPE_CLASS | USB_RECIP_INTERFACE))
861 break; 861 break;
862 if (w_index != 0 || w_value != 0) { 862 if (w_index != 0 || w_value != 0 || w_length != 0) {
863 value = -EDOM; 863 value = -EDOM;
864 break; 864 break;
865 } 865 }
@@ -875,7 +875,7 @@ static int class_setup_req(struct fsg_dev *fsg,
875 if (ctrl->bRequestType != (USB_DIR_IN | 875 if (ctrl->bRequestType != (USB_DIR_IN |
876 USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 876 USB_TYPE_CLASS | USB_RECIP_INTERFACE))
877 break; 877 break;
878 if (w_index != 0 || w_value != 0) { 878 if (w_index != 0 || w_value != 0 || w_length != 1) {
879 value = -EDOM; 879 value = -EDOM;
880 break; 880 break;
881 } 881 }
diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c
index 43a49ecc1f36..dcbc0a2e48dd 100644
--- a/drivers/usb/gadget/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/fsl_mxc_udc.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/fsl_devices.h> 17#include <linux/fsl_devices.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/io.h>
19 20
20#include <mach/hardware.h> 21#include <mach/hardware.h>
21 22
@@ -88,7 +89,6 @@ eenahb:
88void fsl_udc_clk_finalize(struct platform_device *pdev) 89void fsl_udc_clk_finalize(struct platform_device *pdev)
89{ 90{
90 struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; 91 struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
91#if defined(CONFIG_SOC_IMX35)
92 if (cpu_is_mx35()) { 92 if (cpu_is_mx35()) {
93 unsigned int v; 93 unsigned int v;
94 94
@@ -101,7 +101,6 @@ void fsl_udc_clk_finalize(struct platform_device *pdev)
101 USBPHYCTRL_OTGBASE_OFFSET)); 101 USBPHYCTRL_OTGBASE_OFFSET));
102 } 102 }
103 } 103 }
104#endif
105 104
106 /* ULPI transceivers don't need usbpll */ 105 /* ULPI transceivers don't need usbpll */
107 if (pdata->phy_mode == FSL_USB2_PHY_ULPI) { 106 if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 2a03e4de11c1..e00cf92409ce 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2336,8 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
2336 if (!udc_controller) 2336 if (!udc_controller)
2337 return -ENODEV; 2337 return -ENODEV;
2338 2338
2339 if (!driver || (driver->speed != USB_SPEED_FULL 2339 if (!driver || driver->speed < USB_SPEED_FULL
2340 && driver->speed != USB_SPEED_HIGH)
2341 || !bind || !driver->disconnect || !driver->setup) 2340 || !bind || !driver->disconnect || !driver->setup)
2342 return -EINVAL; 2341 return -EINVAL;
2343 2342
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index b2c44e1d5813..dd28ef3def71 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -696,12 +696,31 @@ static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
696 kfree(req); 696 kfree(req);
697} 697}
698 698
699/*-------------------------------------------------------------------------*/ 699/* Actually add a dTD chain to an empty dQH and let go */
700static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
701{
702 struct ep_queue_head *qh = get_qh_by_ep(ep);
703
704 /* Write dQH next pointer and terminate bit to 0 */
705 qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
706 & EP_QUEUE_HEAD_NEXT_POINTER_MASK);
707
708 /* Clear active and halt bit */
709 qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
710 | EP_QUEUE_HEAD_STATUS_HALT));
711
712 /* Ensure that updates to the QH will occur before priming. */
713 wmb();
714
715 /* Prime endpoint by writing correct bit to ENDPTPRIME */
716 fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
717 : (1 << (ep_index(ep))), &dr_regs->endpointprime);
718}
719
720/* Add dTD chain to the dQH of an EP */
700static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req) 721static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
701{ 722{
702 int i = ep_index(ep) * 2 + ep_is_in(ep);
703 u32 temp, bitmask, tmp_stat; 723 u32 temp, bitmask, tmp_stat;
704 struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
705 724
706 /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr); 725 /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
707 VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */ 726 VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
@@ -719,7 +738,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
719 cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK); 738 cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
720 /* Read prime bit, if 1 goto done */ 739 /* Read prime bit, if 1 goto done */
721 if (fsl_readl(&dr_regs->endpointprime) & bitmask) 740 if (fsl_readl(&dr_regs->endpointprime) & bitmask)
722 goto out; 741 return;
723 742
724 do { 743 do {
725 /* Set ATDTW bit in USBCMD */ 744 /* Set ATDTW bit in USBCMD */
@@ -736,28 +755,10 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
736 fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd); 755 fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
737 756
738 if (tmp_stat) 757 if (tmp_stat)
739 goto out; 758 return;
740 } 759 }
741 760
742 /* Write dQH next pointer and terminate bit to 0 */ 761 fsl_prime_ep(ep, req->head);
743 temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
744 dQH->next_dtd_ptr = cpu_to_hc32(temp);
745
746 /* Clear active and halt bit */
747 temp = cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
748 | EP_QUEUE_HEAD_STATUS_HALT));
749 dQH->size_ioc_int_sts &= temp;
750
751 /* Ensure that updates to the QH will occur before priming. */
752 wmb();
753
754 /* Prime endpoint by writing 1 to ENDPTPRIME */
755 temp = ep_is_in(ep)
756 ? (1 << (ep_index(ep) + 16))
757 : (1 << (ep_index(ep)));
758 fsl_writel(temp, &dr_regs->endpointprime);
759out:
760 return;
761} 762}
762 763
763/* Fill in the dTD structure 764/* Fill in the dTD structure
@@ -877,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
877 VDBG("%s, bad ep", __func__); 878 VDBG("%s, bad ep", __func__);
878 return -EINVAL; 879 return -EINVAL;
879 } 880 }
880 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { 881 if (usb_endpoint_xfer_isoc(ep->desc)) {
881 if (req->req.length > ep->ep.maxpacket) 882 if (req->req.length > ep->ep.maxpacket)
882 return -EMSGSIZE; 883 return -EMSGSIZE;
883 } 884 }
@@ -973,25 +974,20 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
973 974
974 /* The request isn't the last request in this ep queue */ 975 /* The request isn't the last request in this ep queue */
975 if (req->queue.next != &ep->queue) { 976 if (req->queue.next != &ep->queue) {
976 struct ep_queue_head *qh;
977 struct fsl_req *next_req; 977 struct fsl_req *next_req;
978 978
979 qh = ep->qh;
980 next_req = list_entry(req->queue.next, struct fsl_req, 979 next_req = list_entry(req->queue.next, struct fsl_req,
981 queue); 980 queue);
982 981
983 /* Point the QH to the first TD of next request */ 982 /* prime with dTD of next request */
984 fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr); 983 fsl_prime_ep(ep, next_req->head);
985 } 984 }
986 985 /* The request hasn't been processed, patch up the TD chain */
987 /* The request hasn't been processed, patch up the TD chain */
988 } else { 986 } else {
989 struct fsl_req *prev_req; 987 struct fsl_req *prev_req;
990 988
991 prev_req = list_entry(req->queue.prev, struct fsl_req, queue); 989 prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
992 fsl_writel(fsl_readl(&req->tail->next_td_ptr), 990 prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
993 &prev_req->tail->next_td_ptr);
994
995 } 991 }
996 992
997 done(ep, req, -ECONNRESET); 993 done(ep, req, -ECONNRESET);
@@ -1032,7 +1028,7 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
1032 goto out; 1028 goto out;
1033 } 1029 }
1034 1030
1035 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { 1031 if (usb_endpoint_xfer_isoc(ep->desc)) {
1036 status = -EOPNOTSUPP; 1032 status = -EOPNOTSUPP;
1037 goto out; 1033 goto out;
1038 } 1034 }
@@ -1068,7 +1064,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
1068 struct fsl_udc *udc; 1064 struct fsl_udc *udc;
1069 int size = 0; 1065 int size = 0;
1070 u32 bitmask; 1066 u32 bitmask;
1071 struct ep_queue_head *d_qh; 1067 struct ep_queue_head *qh;
1072 1068
1073 ep = container_of(_ep, struct fsl_ep, ep); 1069 ep = container_of(_ep, struct fsl_ep, ep);
1074 if (!_ep || (!ep->desc && ep_index(ep) != 0)) 1070 if (!_ep || (!ep->desc && ep_index(ep) != 0))
@@ -1079,13 +1075,13 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
1079 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) 1075 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
1080 return -ESHUTDOWN; 1076 return -ESHUTDOWN;
1081 1077
1082 d_qh = &ep->udc->ep_qh[ep_index(ep) * 2 + ep_is_in(ep)]; 1078 qh = get_qh_by_ep(ep);
1083 1079
1084 bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) : 1080 bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
1085 (1 << (ep_index(ep))); 1081 (1 << (ep_index(ep)));
1086 1082
1087 if (fsl_readl(&dr_regs->endptstatus) & bitmask) 1083 if (fsl_readl(&dr_regs->endptstatus) & bitmask)
1088 size = (d_qh->size_ioc_int_sts & DTD_PACKET_SIZE) 1084 size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
1089 >> DTD_LENGTH_BIT_POS; 1085 >> DTD_LENGTH_BIT_POS;
1090 1086
1091 pr_debug("%s %u\n", __func__, size); 1087 pr_debug("%s %u\n", __func__, size);
@@ -1717,7 +1713,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
1717 1713
1718static inline enum usb_device_speed portscx_device_speed(u32 reg) 1714static inline enum usb_device_speed portscx_device_speed(u32 reg)
1719{ 1715{
1720 switch (speed & PORTSCX_PORT_SPEED_MASK) { 1716 switch (reg & PORTSCX_PORT_SPEED_MASK) {
1721 case PORTSCX_PORT_SPEED_HIGH: 1717 case PORTSCX_PORT_SPEED_HIGH:
1722 return USB_SPEED_HIGH; 1718 return USB_SPEED_HIGH;
1723 case PORTSCX_PORT_SPEED_FULL: 1719 case PORTSCX_PORT_SPEED_FULL:
@@ -1938,8 +1934,7 @@ static int fsl_start(struct usb_gadget_driver *driver,
1938 if (!udc_controller) 1934 if (!udc_controller)
1939 return -ENODEV; 1935 return -ENODEV;
1940 1936
1941 if (!driver || (driver->speed != USB_SPEED_FULL 1937 if (!driver || driver->speed < USB_SPEED_FULL
1942 && driver->speed != USB_SPEED_HIGH)
1943 || !bind || !driver->disconnect || !driver->setup) 1938 || !bind || !driver->disconnect || !driver->setup)
1944 return -EINVAL; 1939 return -EINVAL;
1945 1940
@@ -2480,8 +2475,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
2480 2475
2481#ifndef CONFIG_ARCH_MXC 2476#ifndef CONFIG_ARCH_MXC
2482 if (pdata->have_sysif_regs) 2477 if (pdata->have_sysif_regs)
2483 usb_sys_regs = (struct usb_sys_interface *) 2478 usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET;
2484 ((u32)dr_regs + USB_DR_SYS_OFFSET);
2485#endif 2479#endif
2486 2480
2487 /* Initialize USB clocks */ 2481 /* Initialize USB clocks */
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 1d51be83fda8..f781f5dec417 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -569,6 +569,16 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
569 * 2 + ((windex & USB_DIR_IN) ? 1 : 0)) 569 * 2 + ((windex & USB_DIR_IN) ? 1 : 0))
570#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP)) 570#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
571 571
572static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
573{
574 /* we only have one ep0 structure but two queue heads */
575 if (ep_index(ep) != 0)
576 return ep->qh;
577 else
578 return &ep->udc->ep_qh[(ep->udc->ep0_dir ==
579 USB_DIR_IN) ? 1 : 0];
580}
581
572struct platform_device; 582struct platform_device;
573#ifdef CONFIG_ARCH_MXC 583#ifdef CONFIG_ARCH_MXC
574int fsl_udc_clk_init(struct platform_device *pdev); 584int fsl_udc_clk_init(struct platform_device *pdev);
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index a392ec0d2d51..6ccae2707e59 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1730,8 +1730,9 @@ static void
1730gadgetfs_disconnect (struct usb_gadget *gadget) 1730gadgetfs_disconnect (struct usb_gadget *gadget)
1731{ 1731{
1732 struct dev_data *dev = get_gadget_data (gadget); 1732 struct dev_data *dev = get_gadget_data (gadget);
1733 unsigned long flags;
1733 1734
1734 spin_lock (&dev->lock); 1735 spin_lock_irqsave (&dev->lock, flags);
1735 if (dev->state == STATE_DEV_UNCONNECTED) 1736 if (dev->state == STATE_DEV_UNCONNECTED)
1736 goto exit; 1737 goto exit;
1737 dev->state = STATE_DEV_UNCONNECTED; 1738 dev->state = STATE_DEV_UNCONNECTED;
@@ -1740,7 +1741,7 @@ gadgetfs_disconnect (struct usb_gadget *gadget)
1740 next_event (dev, GADGETFS_DISCONNECT); 1741 next_event (dev, GADGETFS_DISCONNECT);
1741 ep0_readable (dev); 1742 ep0_readable (dev);
1742exit: 1743exit:
1743 spin_unlock (&dev->lock); 1744 spin_unlock_irqrestore (&dev->lock, flags);
1744} 1745}
1745 1746
1746static void 1747static void
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 91d0af2a24a8..9aa1cbbee45b 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver,
1472 int retval; 1472 int retval;
1473 1473
1474 if (!driver 1474 if (!driver
1475 || driver->speed != USB_SPEED_HIGH 1475 || driver->speed < USB_SPEED_HIGH
1476 || !bind 1476 || !bind
1477 || !driver->setup) 1477 || !driver->setup)
1478 return -EINVAL; 1478 return -EINVAL;
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 7f1bc9a73cda..da2b9d0be3ca 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget,
1881 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE) 1881 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
1882 * "must not be used in normal operation" 1882 * "must not be used in normal operation"
1883 */ 1883 */
1884 if (!driver || driver->speed != USB_SPEED_HIGH 1884 if (!driver || driver->speed < USB_SPEED_HIGH
1885 || !driver->setup) 1885 || !driver->setup)
1886 return -EINVAL; 1886 return -EINVAL;
1887 1887
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 550d6dcdf104..5048a0c07640 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. 2 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -354,6 +354,7 @@ struct pch_udc_dev {
354#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808 354#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
355#define PCI_VENDOR_ID_ROHM 0x10DB 355#define PCI_VENDOR_ID_ROHM 0x10DB
356#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D 356#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
357#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
357 358
358static const char ep0_string[] = "ep0in"; 359static const char ep0_string[] = "ep0in";
359static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */ 360static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
@@ -2970,6 +2971,11 @@ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
2970 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, 2971 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2971 .class_mask = 0xffffffff, 2972 .class_mask = 0xffffffff,
2972 }, 2973 },
2974 {
2975 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
2976 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2977 .class_mask = 0xffffffff,
2978 },
2973 { 0 }, 2979 { 0 },
2974}; 2980};
2975 2981
@@ -2999,5 +3005,5 @@ static void __exit pch_udc_pci_exit(void)
2999module_exit(pch_udc_pci_exit); 3005module_exit(pch_udc_pci_exit);
3000 3006
3001MODULE_DESCRIPTION("Intel EG20T USB Device Controller"); 3007MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3002MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>"); 3008MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3003MODULE_LICENSE("GPL"); 3009MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 68a826a1b866..fc719a3f8557 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1718,6 +1718,8 @@ static void r8a66597_fifo_flush(struct usb_ep *_ep)
1718 if (list_empty(&ep->queue) && !ep->busy) { 1718 if (list_empty(&ep->queue) && !ep->busy) {
1719 pipe_stop(ep->r8a66597, ep->pipenum); 1719 pipe_stop(ep->r8a66597, ep->pipenum);
1720 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr); 1720 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
1721 r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr);
1722 r8a66597_write(ep->r8a66597, 0, ep->pipectr);
1721 } 1723 }
1722 spin_unlock_irqrestore(&ep->r8a66597->lock, flags); 1724 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1723} 1725}
@@ -1742,26 +1744,16 @@ static int r8a66597_start(struct usb_gadget *gadget,
1742 struct usb_gadget_driver *driver) 1744 struct usb_gadget_driver *driver)
1743{ 1745{
1744 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget); 1746 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1745 int retval;
1746 1747
1747 if (!driver 1748 if (!driver
1748 || driver->speed != USB_SPEED_HIGH 1749 || driver->speed < USB_SPEED_HIGH
1749 || !driver->setup) 1750 || !driver->setup)
1750 return -EINVAL; 1751 return -EINVAL;
1751 if (!r8a66597) 1752 if (!r8a66597)
1752 return -ENODEV; 1753 return -ENODEV;
1753 1754
1754 /* hook up the driver */ 1755 /* hook up the driver */
1755 driver->driver.bus = NULL;
1756 r8a66597->driver = driver; 1756 r8a66597->driver = driver;
1757 r8a66597->gadget.dev.driver = &driver->driver;
1758
1759 retval = device_add(&r8a66597->gadget.dev);
1760 if (retval) {
1761 dev_err(r8a66597_to_dev(r8a66597), "device_add error (%d)\n",
1762 retval);
1763 goto error;
1764 }
1765 1757
1766 init_controller(r8a66597); 1758 init_controller(r8a66597);
1767 r8a66597_bset(r8a66597, VBSE, INTENB0); 1759 r8a66597_bset(r8a66597, VBSE, INTENB0);
@@ -1775,12 +1767,6 @@ static int r8a66597_start(struct usb_gadget *gadget,
1775 } 1767 }
1776 1768
1777 return 0; 1769 return 0;
1778
1779error:
1780 r8a66597->driver = NULL;
1781 r8a66597->gadget.dev.driver = NULL;
1782
1783 return retval;
1784} 1770}
1785 1771
1786static int r8a66597_stop(struct usb_gadget *gadget, 1772static int r8a66597_stop(struct usb_gadget *gadget,
@@ -1794,7 +1780,6 @@ static int r8a66597_stop(struct usb_gadget *gadget,
1794 disable_controller(r8a66597); 1780 disable_controller(r8a66597);
1795 spin_unlock_irqrestore(&r8a66597->lock, flags); 1781 spin_unlock_irqrestore(&r8a66597->lock, flags);
1796 1782
1797 device_del(&r8a66597->gadget.dev);
1798 r8a66597->driver = NULL; 1783 r8a66597->driver = NULL;
1799 return 0; 1784 return 0;
1800} 1785}
@@ -1845,6 +1830,7 @@ static int __exit r8a66597_remove(struct platform_device *pdev)
1845 clk_put(r8a66597->clk); 1830 clk_put(r8a66597->clk);
1846 } 1831 }
1847#endif 1832#endif
1833 device_unregister(&r8a66597->gadget.dev);
1848 kfree(r8a66597); 1834 kfree(r8a66597);
1849 return 0; 1835 return 0;
1850} 1836}
@@ -1924,13 +1910,17 @@ static int __init r8a66597_probe(struct platform_device *pdev)
1924 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW; 1910 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
1925 1911
1926 r8a66597->gadget.ops = &r8a66597_gadget_ops; 1912 r8a66597->gadget.ops = &r8a66597_gadget_ops;
1927 device_initialize(&r8a66597->gadget.dev);
1928 dev_set_name(&r8a66597->gadget.dev, "gadget"); 1913 dev_set_name(&r8a66597->gadget.dev, "gadget");
1929 r8a66597->gadget.is_dualspeed = 1; 1914 r8a66597->gadget.is_dualspeed = 1;
1930 r8a66597->gadget.dev.parent = &pdev->dev; 1915 r8a66597->gadget.dev.parent = &pdev->dev;
1931 r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask; 1916 r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
1932 r8a66597->gadget.dev.release = pdev->dev.release; 1917 r8a66597->gadget.dev.release = pdev->dev.release;
1933 r8a66597->gadget.name = udc_name; 1918 r8a66597->gadget.name = udc_name;
1919 ret = device_register(&r8a66597->gadget.dev);
1920 if (ret < 0) {
1921 dev_err(&pdev->dev, "device_register failed\n");
1922 goto clean_up;
1923 }
1934 1924
1935 init_timer(&r8a66597->timer); 1925 init_timer(&r8a66597->timer);
1936 r8a66597->timer.function = r8a66597_timer; 1926 r8a66597->timer.function = r8a66597_timer;
@@ -1945,7 +1935,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
1945 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", 1935 dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
1946 clk_name); 1936 clk_name);
1947 ret = PTR_ERR(r8a66597->clk); 1937 ret = PTR_ERR(r8a66597->clk);
1948 goto clean_up; 1938 goto clean_up_dev;
1949 } 1939 }
1950 clk_enable(r8a66597->clk); 1940 clk_enable(r8a66597->clk);
1951 } 1941 }
@@ -2014,7 +2004,9 @@ clean_up2:
2014 clk_disable(r8a66597->clk); 2004 clk_disable(r8a66597->clk);
2015 clk_put(r8a66597->clk); 2005 clk_put(r8a66597->clk);
2016 } 2006 }
2007clean_up_dev:
2017#endif 2008#endif
2009 device_unregister(&r8a66597->gadget.dev);
2018clean_up: 2010clean_up:
2019 if (r8a66597) { 2011 if (r8a66597) {
2020 if (r8a66597->sudmac_reg) 2012 if (r8a66597->sudmac_reg)
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index a552453dc946..b31448229f0b 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2586,10 +2586,8 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver,
2586 return -EINVAL; 2586 return -EINVAL;
2587 } 2587 }
2588 2588
2589 if (driver->speed != USB_SPEED_HIGH && 2589 if (driver->speed < USB_SPEED_FULL)
2590 driver->speed != USB_SPEED_FULL) {
2591 dev_err(hsotg->dev, "%s: bad speed\n", __func__); 2590 dev_err(hsotg->dev, "%s: bad speed\n", __func__);
2592 }
2593 2591
2594 if (!bind || !driver->setup) { 2592 if (!bind || !driver->setup) {
2595 dev_err(hsotg->dev, "%s: missing entry points\n", __func__); 2593 dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 8d54f893cefe..20a553b46aed 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -1142,8 +1142,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
1142 int ret; 1142 int ret;
1143 1143
1144 if (!driver 1144 if (!driver
1145 || (driver->speed != USB_SPEED_FULL && 1145 || driver->speed < USB_SPEED_FULL
1146 driver->speed != USB_SPEED_HIGH)
1147 || !bind 1146 || !bind
1148 || !driver->unbind || !driver->disconnect || !driver->setup) 1147 || !driver->unbind || !driver->disconnect || !driver->setup)
1149 return -EINVAL; 1148 return -EINVAL;
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 022baeca7c94..6939e17f4580 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -210,10 +210,10 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
210 kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); 210 kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
211 211
212 if (udc_is_newstyle(udc)) { 212 if (udc_is_newstyle(udc)) {
213 usb_gadget_disconnect(udc->gadget); 213 udc->driver->disconnect(udc->gadget);
214 udc->driver->unbind(udc->gadget); 214 udc->driver->unbind(udc->gadget);
215 usb_gadget_udc_stop(udc->gadget, udc->driver); 215 usb_gadget_udc_stop(udc->gadget, udc->driver);
216 216 usb_gadget_disconnect(udc->gadget);
217 } else { 217 } else {
218 usb_gadget_stop(udc->gadget, udc->driver); 218 usb_gadget_stop(udc->gadget, udc->driver);
219 } 219 }
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
344static ssize_t usb_udc_srp_store(struct device *dev, 344static ssize_t usb_udc_srp_store(struct device *dev,
345 struct device_attribute *attr, const char *buf, size_t n) 345 struct device_attribute *attr, const char *buf, size_t n)
346{ 346{
347 struct usb_udc *udc = dev_get_drvdata(dev); 347 struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
348 348
349 if (sysfs_streq(buf, "1")) 349 if (sysfs_streq(buf, "1"))
350 usb_gadget_wakeup(udc->gadget); 350 usb_gadget_wakeup(udc->gadget);
@@ -378,7 +378,7 @@ static ssize_t usb_udc_speed_show(struct device *dev,
378 return snprintf(buf, PAGE_SIZE, "%s\n", 378 return snprintf(buf, PAGE_SIZE, "%s\n",
379 usb_speed_string(udc->gadget->speed)); 379 usb_speed_string(udc->gadget->speed));
380} 380}
381static DEVICE_ATTR(speed, S_IRUSR, usb_udc_speed_show, NULL); 381static DEVICE_ATTR(speed, S_IRUGO, usb_udc_speed_show, NULL);
382 382
383#define USB_UDC_ATTR(name) \ 383#define USB_UDC_ATTR(name) \
384ssize_t usb_udc_##name##_show(struct device *dev, \ 384ssize_t usb_udc_##name##_show(struct device *dev, \
@@ -389,7 +389,7 @@ ssize_t usb_udc_##name##_show(struct device *dev, \
389 \ 389 \
390 return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \ 390 return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \
391} \ 391} \
392static DEVICE_ATTR(name, S_IRUSR, usb_udc_##name##_show, NULL) 392static DEVICE_ATTR(name, S_IRUGO, usb_udc_##name##_show, NULL)
393 393
394static USB_UDC_ATTR(is_dualspeed); 394static USB_UDC_ATTR(is_dualspeed);
395static USB_UDC_ATTR(is_otg); 395static USB_UDC_ATTR(is_otg);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 2e829fae6482..a60679cbbf85 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1475,30 +1475,36 @@ iso_stream_schedule (
1475 * jump until after the queue is primed. 1475 * jump until after the queue is primed.
1476 */ 1476 */
1477 else { 1477 else {
1478 int done = 0;
1478 start = SCHEDULE_SLOP + (now & ~0x07); 1479 start = SCHEDULE_SLOP + (now & ~0x07);
1479 1480
1480 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ 1481 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
1481 1482
1482 /* find a uframe slot with enough bandwidth */ 1483 /* find a uframe slot with enough bandwidth.
1483 next = start + period; 1484 * Early uframes are more precious because full-speed
1484 for (; start < next; start++) { 1485 * iso IN transfers can't use late uframes,
1485 1486 * and therefore they should be allocated last.
1487 */
1488 next = start;
1489 start += period;
1490 do {
1491 start--;
1486 /* check schedule: enough space? */ 1492 /* check schedule: enough space? */
1487 if (stream->highspeed) { 1493 if (stream->highspeed) {
1488 if (itd_slot_ok(ehci, mod, start, 1494 if (itd_slot_ok(ehci, mod, start,
1489 stream->usecs, period)) 1495 stream->usecs, period))
1490 break; 1496 done = 1;
1491 } else { 1497 } else {
1492 if ((start % 8) >= 6) 1498 if ((start % 8) >= 6)
1493 continue; 1499 continue;
1494 if (sitd_slot_ok(ehci, mod, stream, 1500 if (sitd_slot_ok(ehci, mod, stream,
1495 start, sched, period)) 1501 start, sched, period))
1496 break; 1502 done = 1;
1497 } 1503 }
1498 } 1504 } while (start > next && !done);
1499 1505
1500 /* no room in the schedule */ 1506 /* no room in the schedule */
1501 if (start == next) { 1507 if (!done) {
1502 ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n", 1508 ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
1503 urb, now, now + mod); 1509 urb, now, now + mod);
1504 status = -ENOSPC; 1510 status = -ENOSPC;
diff --git a/drivers/usb/host/ehci-xls.c b/drivers/usb/host/ehci-xls.c
index fe74bd676018..b4fb511d24bc 100644
--- a/drivers/usb/host/ehci-xls.c
+++ b/drivers/usb/host/ehci-xls.c
@@ -19,7 +19,7 @@ static int ehci_xls_setup(struct usb_hcd *hcd)
19 19
20 ehci->caps = hcd->regs; 20 ehci->caps = hcd->regs;
21 ehci->regs = hcd->regs + 21 ehci->regs = hcd->regs +
22 HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); 22 HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
23 dbg_hcs_params(ehci, "reset"); 23 dbg_hcs_params(ehci, "reset");
24 dbg_hcc_params(ehci, "reset"); 24 dbg_hcc_params(ehci, "reset");
25 25
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index ba3a46b78b75..95a9fec38e89 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -223,6 +223,9 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
223 if (port < 0 || port >= 2) 223 if (port < 0 || port >= 2)
224 return; 224 return;
225 225
226 if (pdata->vbus_pin[port] <= 0)
227 return;
228
226 gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable); 229 gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable);
227} 230}
228 231
@@ -231,6 +234,9 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
231 if (port < 0 || port >= 2) 234 if (port < 0 || port >= 2)
232 return -EINVAL; 235 return -EINVAL;
233 236
237 if (pdata->vbus_pin[port] <= 0)
238 return -EINVAL;
239
234 return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted; 240 return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted;
235} 241}
236 242
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 34efd479e068..b2639191549e 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -389,17 +389,14 @@ ohci_shutdown (struct usb_hcd *hcd)
389 struct ohci_hcd *ohci; 389 struct ohci_hcd *ohci;
390 390
391 ohci = hcd_to_ohci (hcd); 391 ohci = hcd_to_ohci (hcd);
392 ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); 392 ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable);
393 ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
394 393
395 /* If the SHUTDOWN quirk is set, don't put the controller in RESET */ 394 /* Software reset, after which the controller goes into SUSPEND */
396 ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ? 395 ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus);
397 OHCI_CTRL_RWC | OHCI_CTRL_HCFS : 396 ohci_readl(ohci, &ohci->regs->cmdstatus); /* flush the writes */
398 OHCI_CTRL_RWC); 397 udelay(10);
399 ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
400 398
401 /* flush the writes */ 399 ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval);
402 (void) ohci_readl (ohci, &ohci->regs->control);
403} 400}
404 401
405static int check_ed(struct ohci_hcd *ohci, struct ed *ed) 402static int check_ed(struct ohci_hcd *ohci, struct ed *ed)
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index ad8166c681e2..bc01b064585a 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -175,28 +175,6 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
175 return 0; 175 return 0;
176} 176}
177 177
178/* nVidia controllers continue to drive Reset signalling on the bus
179 * even after system shutdown, wasting power. This flag tells the
180 * shutdown routine to leave the controller OPERATIONAL instead of RESET.
181 */
182static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
183{
184 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
185 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
186
187 /* Evidently nVidia fixed their later hardware; this is a guess at
188 * the changeover point.
189 */
190#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB 0x026d
191
192 if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) {
193 ohci->flags |= OHCI_QUIRK_SHUTDOWN;
194 ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
195 }
196
197 return 0;
198}
199
200static void sb800_prefetch(struct ohci_hcd *ohci, int on) 178static void sb800_prefetch(struct ohci_hcd *ohci, int on)
201{ 179{
202 struct pci_dev *pdev; 180 struct pci_dev *pdev;
@@ -260,10 +238,6 @@ static const struct pci_device_id ohci_pci_quirks[] = {
260 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), 238 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
261 .driver_data = (unsigned long)ohci_quirk_amd700, 239 .driver_data = (unsigned long)ohci_quirk_amd700,
262 }, 240 },
263 {
264 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
265 .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown,
266 },
267 241
268 /* FIXME for some of the early AMD 760 southbridges, OHCI 242 /* FIXME for some of the early AMD 760 southbridges, OHCI
269 * won't work at all. blacklist them. 243 * won't work at all. blacklist them.
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 35e5fd640ce7..0795b934d00c 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -403,7 +403,6 @@ struct ohci_hcd {
403#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ 403#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
404#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/ 404#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
405#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ 405#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
406#define OHCI_QUIRK_SHUTDOWN 0x800 /* nVidia power bug */
407 // there are also chip quirks/bugs in init logic 406 // there are also chip quirks/bugs in init logic
408 407
409 struct work_struct nec_work; /* Worker for NEC quirk */ 408 struct work_struct nec_work; /* Worker for NEC quirk */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 27a3dec32fa2..caf87428ca43 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -37,6 +37,7 @@
37#define OHCI_INTRENABLE 0x10 37#define OHCI_INTRENABLE 0x10
38#define OHCI_INTRDISABLE 0x14 38#define OHCI_INTRDISABLE 0x14
39#define OHCI_FMINTERVAL 0x34 39#define OHCI_FMINTERVAL 0x34
40#define OHCI_HCFS (3 << 6) /* hc functional state */
40#define OHCI_HCR (1 << 0) /* host controller reset */ 41#define OHCI_HCR (1 << 0) /* host controller reset */
41#define OHCI_OCR (1 << 3) /* ownership change request */ 42#define OHCI_OCR (1 << 3) /* ownership change request */
42#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ 43#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
@@ -466,6 +467,8 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
466{ 467{
467 void __iomem *base; 468 void __iomem *base;
468 u32 control; 469 u32 control;
470 u32 fminterval;
471 int cnt;
469 472
470 if (!mmio_resource_enabled(pdev, 0)) 473 if (!mmio_resource_enabled(pdev, 0))
471 return; 474 return;
@@ -498,41 +501,32 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
498 } 501 }
499#endif 502#endif
500 503
501 /* reset controller, preserving RWC (and possibly IR) */ 504 /* disable interrupts */
502 writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); 505 writel((u32) ~0, base + OHCI_INTRDISABLE);
503 readl(base + OHCI_CONTROL);
504 506
505 /* Some NVIDIA controllers stop working if kept in RESET for too long */ 507 /* Reset the USB bus, if the controller isn't already in RESET */
506 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) { 508 if (control & OHCI_HCFS) {
507 u32 fminterval; 509 /* Go into RESET, preserving RWC (and possibly IR) */
508 int cnt; 510 writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
511 readl(base + OHCI_CONTROL);
509 512
510 /* drive reset for at least 50 ms (7.1.7.5) */ 513 /* drive bus reset for at least 50 ms (7.1.7.5) */
511 msleep(50); 514 msleep(50);
515 }
512 516
513 /* software reset of the controller, preserving HcFmInterval */ 517 /* software reset of the controller, preserving HcFmInterval */
514 fminterval = readl(base + OHCI_FMINTERVAL); 518 fminterval = readl(base + OHCI_FMINTERVAL);
515 writel(OHCI_HCR, base + OHCI_CMDSTATUS); 519 writel(OHCI_HCR, base + OHCI_CMDSTATUS);
516 520
517 /* reset requires max 10 us delay */ 521 /* reset requires max 10 us delay */
518 for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ 522 for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */
519 if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) 523 if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
520 break; 524 break;
521 udelay(1); 525 udelay(1);
522 }
523 writel(fminterval, base + OHCI_FMINTERVAL);
524
525 /* Now we're in the SUSPEND state with all devices reset
526 * and wakeups and interrupts disabled
527 */
528 } 526 }
527 writel(fminterval, base + OHCI_FMINTERVAL);
529 528
530 /* 529 /* Now the controller is safely in SUSPEND and nothing can wake it up */
531 * disable interrupts
532 */
533 writel(~(u32)0, base + OHCI_INTRDISABLE);
534 writel(~(u32)0, base + OHCI_INTRSTATUS);
535
536 iounmap(base); 530 iounmap(base);
537} 531}
538 532
@@ -627,7 +621,7 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
627 void __iomem *base, *op_reg_base; 621 void __iomem *base, *op_reg_base;
628 u32 hcc_params, cap, val; 622 u32 hcc_params, cap, val;
629 u8 offset, cap_length; 623 u8 offset, cap_length;
630 int wait_time, delta, count = 256/4; 624 int wait_time, count = 256/4;
631 625
632 if (!mmio_resource_enabled(pdev, 0)) 626 if (!mmio_resource_enabled(pdev, 0))
633 return; 627 return;
@@ -673,11 +667,10 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
673 writel(val, op_reg_base + EHCI_USBCMD); 667 writel(val, op_reg_base + EHCI_USBCMD);
674 668
675 wait_time = 2000; 669 wait_time = 2000;
676 delta = 100;
677 do { 670 do {
678 writel(0x3f, op_reg_base + EHCI_USBSTS); 671 writel(0x3f, op_reg_base + EHCI_USBSTS);
679 udelay(delta); 672 udelay(100);
680 wait_time -= delta; 673 wait_time -= 100;
681 val = readl(op_reg_base + EHCI_USBSTS); 674 val = readl(op_reg_base + EHCI_USBSTS);
682 if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) { 675 if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
683 break; 676 break;
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index d6e175428618..a403b53e86b9 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
124{ 124{
125 qset->td_start = qset->td_end = qset->ntds = 0; 125 qset->td_start = qset->td_end = qset->ntds = 0;
126 126
127 qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T); 127 qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
128 qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK; 128 qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
129 qset->qh.err_count = 0; 129 qset->qh.err_count = 0;
130 qset->qh.scratch[0] = 0; 130 qset->qh.scratch[0] = 0;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 42a22b8e6922..0e4b25fa3bcd 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -982,7 +982,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
982 struct xhci_virt_device *dev; 982 struct xhci_virt_device *dev;
983 struct xhci_ep_ctx *ep0_ctx; 983 struct xhci_ep_ctx *ep0_ctx;
984 struct xhci_slot_ctx *slot_ctx; 984 struct xhci_slot_ctx *slot_ctx;
985 struct xhci_input_control_ctx *ctrl_ctx;
986 u32 port_num; 985 u32 port_num;
987 struct usb_device *top_dev; 986 struct usb_device *top_dev;
988 987
@@ -994,12 +993,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
994 return -EINVAL; 993 return -EINVAL;
995 } 994 }
996 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); 995 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
997 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
998 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); 996 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
999 997
1000 /* 2) New slot context and endpoint 0 context are valid*/
1001 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
1002
1003 /* 3) Only the control endpoint is valid - one endpoint context */ 998 /* 3) Only the control endpoint is valid - one endpoint context */
1004 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); 999 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1005 switch (udev->speed) { 1000 switch (udev->speed) {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 940321b3ec68..9f1d4b15d818 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -816,23 +816,24 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
816 struct xhci_ring *ring; 816 struct xhci_ring *ring;
817 struct xhci_td *cur_td; 817 struct xhci_td *cur_td;
818 int ret, i, j; 818 int ret, i, j;
819 unsigned long flags;
819 820
820 ep = (struct xhci_virt_ep *) arg; 821 ep = (struct xhci_virt_ep *) arg;
821 xhci = ep->xhci; 822 xhci = ep->xhci;
822 823
823 spin_lock(&xhci->lock); 824 spin_lock_irqsave(&xhci->lock, flags);
824 825
825 ep->stop_cmds_pending--; 826 ep->stop_cmds_pending--;
826 if (xhci->xhc_state & XHCI_STATE_DYING) { 827 if (xhci->xhc_state & XHCI_STATE_DYING) {
827 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " 828 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
828 "xHCI as DYING, exiting.\n"); 829 "xHCI as DYING, exiting.\n");
829 spin_unlock(&xhci->lock); 830 spin_unlock_irqrestore(&xhci->lock, flags);
830 return; 831 return;
831 } 832 }
832 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { 833 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
833 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " 834 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
834 "exiting.\n"); 835 "exiting.\n");
835 spin_unlock(&xhci->lock); 836 spin_unlock_irqrestore(&xhci->lock, flags);
836 return; 837 return;
837 } 838 }
838 839
@@ -844,11 +845,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
844 xhci->xhc_state |= XHCI_STATE_DYING; 845 xhci->xhc_state |= XHCI_STATE_DYING;
845 /* Disable interrupts from the host controller and start halting it */ 846 /* Disable interrupts from the host controller and start halting it */
846 xhci_quiesce(xhci); 847 xhci_quiesce(xhci);
847 spin_unlock(&xhci->lock); 848 spin_unlock_irqrestore(&xhci->lock, flags);
848 849
849 ret = xhci_halt(xhci); 850 ret = xhci_halt(xhci);
850 851
851 spin_lock(&xhci->lock); 852 spin_lock_irqsave(&xhci->lock, flags);
852 if (ret < 0) { 853 if (ret < 0) {
853 /* This is bad; the host is not responding to commands and it's 854 /* This is bad; the host is not responding to commands and it's
854 * not allowing itself to be halted. At least interrupts are 855 * not allowing itself to be halted. At least interrupts are
@@ -896,7 +897,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
896 } 897 }
897 } 898 }
898 } 899 }
899 spin_unlock(&xhci->lock); 900 spin_unlock_irqrestore(&xhci->lock, flags);
900 xhci_dbg(xhci, "Calling usb_hc_died()\n"); 901 xhci_dbg(xhci, "Calling usb_hc_died()\n");
901 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 902 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
902 xhci_dbg(xhci, "xHCI host controller is dead.\n"); 903 xhci_dbg(xhci, "xHCI host controller is dead.\n");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1ff95a0df576..a1afb7c39f7e 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -711,7 +711,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
711 ring = xhci->cmd_ring; 711 ring = xhci->cmd_ring;
712 seg = ring->deq_seg; 712 seg = ring->deq_seg;
713 do { 713 do {
714 memset(seg->trbs, 0, SEGMENT_SIZE); 714 memset(seg->trbs, 0,
715 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
716 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
717 cpu_to_le32(~TRB_CYCLE);
715 seg = seg->next; 718 seg = seg->next;
716 } while (seg != ring->deq_seg); 719 } while (seg != ring->deq_seg);
717 720
@@ -799,7 +802,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
799 u32 command, temp = 0; 802 u32 command, temp = 0;
800 struct usb_hcd *hcd = xhci_to_hcd(xhci); 803 struct usb_hcd *hcd = xhci_to_hcd(xhci);
801 struct usb_hcd *secondary_hcd; 804 struct usb_hcd *secondary_hcd;
802 int retval; 805 int retval = 0;
803 806
804 /* Wait a bit if either of the roothubs need to settle from the 807 /* Wait a bit if either of the roothubs need to settle from the
805 * transition into bus suspend. 808 * transition into bus suspend.
@@ -809,6 +812,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
809 xhci->bus_state[1].next_statechange)) 812 xhci->bus_state[1].next_statechange))
810 msleep(100); 813 msleep(100);
811 814
815 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
816 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
817
812 spin_lock_irq(&xhci->lock); 818 spin_lock_irq(&xhci->lock);
813 if (xhci->quirks & XHCI_RESET_ON_RESUME) 819 if (xhci->quirks & XHCI_RESET_ON_RESUME)
814 hibernated = true; 820 hibernated = true;
@@ -878,20 +884,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
878 return retval; 884 return retval;
879 xhci_dbg(xhci, "Start the primary HCD\n"); 885 xhci_dbg(xhci, "Start the primary HCD\n");
880 retval = xhci_run(hcd->primary_hcd); 886 retval = xhci_run(hcd->primary_hcd);
881 if (retval)
882 goto failed_restart;
883
884 xhci_dbg(xhci, "Start the secondary HCD\n");
885 retval = xhci_run(secondary_hcd);
886 if (!retval) { 887 if (!retval) {
887 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 888 xhci_dbg(xhci, "Start the secondary HCD\n");
888 set_bit(HCD_FLAG_HW_ACCESSIBLE, 889 retval = xhci_run(secondary_hcd);
889 &xhci->shared_hcd->flags);
890 } 890 }
891failed_restart:
892 hcd->state = HC_STATE_SUSPENDED; 891 hcd->state = HC_STATE_SUSPENDED;
893 xhci->shared_hcd->state = HC_STATE_SUSPENDED; 892 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
894 return retval; 893 goto done;
895 } 894 }
896 895
897 /* step 4: set Run/Stop bit */ 896 /* step 4: set Run/Stop bit */
@@ -910,11 +909,14 @@ failed_restart:
910 * Running endpoints by ringing their doorbells 909 * Running endpoints by ringing their doorbells
911 */ 910 */
912 911
913 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
914 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
915
916 spin_unlock_irq(&xhci->lock); 912 spin_unlock_irq(&xhci->lock);
917 return 0; 913
914 done:
915 if (retval == 0) {
916 usb_hcd_resume_root_hub(hcd);
917 usb_hcd_resume_root_hub(xhci->shared_hcd);
918 }
919 return retval;
918} 920}
919#endif /* CONFIG_PM */ 921#endif /* CONFIG_PM */
920 922
@@ -3504,6 +3506,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3504 /* Otherwise, update the control endpoint ring enqueue pointer. */ 3506 /* Otherwise, update the control endpoint ring enqueue pointer. */
3505 else 3507 else
3506 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 3508 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3509 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3510 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3511 ctrl_ctx->drop_flags = 0;
3512
3507 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3513 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3508 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3514 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3509 3515
@@ -3585,7 +3591,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3585 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) 3591 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
3586 + 1; 3592 + 1;
3587 /* Zero the input context control for later use */ 3593 /* Zero the input context control for later use */
3588 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3589 ctrl_ctx->add_flags = 0; 3594 ctrl_ctx->add_flags = 0;
3590 ctrl_ctx->drop_flags = 0; 3595 ctrl_ctx->drop_flags = 0;
3591 3596
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index fc34b8b11910..07a03460a598 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -11,6 +11,7 @@ config USB_MUSB_HDRC
11 select TWL4030_USB if MACH_OMAP_3430SDP 11 select TWL4030_USB if MACH_OMAP_3430SDP
12 select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA 12 select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
13 select USB_OTG_UTILS 13 select USB_OTG_UTILS
14 select USB_GADGET_DUALSPEED
14 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' 15 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
15 help 16 help
16 Say Y here if your system has a dual role high speed USB 17 Say Y here if your system has a dual role high speed USB
@@ -60,7 +61,7 @@ config USB_MUSB_BLACKFIN
60 61
61config USB_MUSB_UX500 62config USB_MUSB_UX500
62 tristate "U8500 and U5500" 63 tristate "U8500 and U5500"
63 depends on (ARCH_U8500 && AB8500_USB) || (ARCH_U5500) 64 depends on (ARCH_U8500 && AB8500_USB)
64 65
65endchoice 66endchoice
66 67
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index 08f1d0b662a3..e233d2b7d335 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/module.h>
30#include <linux/clk.h> 31#include <linux/clk.h>
31#include <linux/io.h> 32#include <linux/io.h>
32#include <linux/platform_device.h> 33#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 4da7492ddbdb..2613bfdb09b6 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/module.h>
30#include <linux/clk.h> 31#include <linux/clk.h>
31#include <linux/io.h> 32#include <linux/io.h>
32#include <linux/platform_device.h> 33#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 20a28731c338..b63ab1570103 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1477,8 +1477,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1477/*-------------------------------------------------------------------------*/ 1477/*-------------------------------------------------------------------------*/
1478 1478
1479#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \ 1479#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \
1480 defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \ 1480 defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500)
1481 defined(CONFIG_ARCH_U5500)
1482 1481
1483static irqreturn_t generic_interrupt(int irq, void *__hci) 1482static irqreturn_t generic_interrupt(int irq, void *__hci)
1484{ 1483{
@@ -2302,18 +2301,12 @@ static int musb_suspend(struct device *dev)
2302 */ 2301 */
2303 } 2302 }
2304 2303
2305 musb_save_context(musb);
2306
2307 spin_unlock_irqrestore(&musb->lock, flags); 2304 spin_unlock_irqrestore(&musb->lock, flags);
2308 return 0; 2305 return 0;
2309} 2306}
2310 2307
2311static int musb_resume_noirq(struct device *dev) 2308static int musb_resume_noirq(struct device *dev)
2312{ 2309{
2313 struct musb *musb = dev_to_musb(dev);
2314
2315 musb_restore_context(musb);
2316
2317 /* for static cmos like DaVinci, register values were preserved 2310 /* for static cmos like DaVinci, register values were preserved
2318 * unless for some reason the whole soc powered down or the USB 2311 * unless for some reason the whole soc powered down or the USB
2319 * module got reset through the PSC (vs just being disabled). 2312 * module got reset through the PSC (vs just being disabled).
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ae4a20acef6c..922148ff8d29 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1903,7 +1903,7 @@ static int musb_gadget_start(struct usb_gadget *g,
1903 unsigned long flags; 1903 unsigned long flags;
1904 int retval = -EINVAL; 1904 int retval = -EINVAL;
1905 1905
1906 if (driver->speed != USB_SPEED_HIGH) 1906 if (driver->speed < USB_SPEED_HIGH)
1907 goto err0; 1907 goto err0;
1908 1908
1909 pm_runtime_get_sync(musb->controller); 1909 pm_runtime_get_sync(musb->controller);
@@ -1999,10 +1999,6 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1999 nuke(&hw_ep->ep_out, -ESHUTDOWN); 1999 nuke(&hw_ep->ep_out, -ESHUTDOWN);
2000 } 2000 }
2001 } 2001 }
2002
2003 spin_unlock(&musb->lock);
2004 driver->disconnect(&musb->g);
2005 spin_lock(&musb->lock);
2006 } 2002 }
2007} 2003}
2008 2004
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index d2e2efaba658..08c679c0dde5 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -405,7 +405,7 @@ int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
405/* 405/*
406 * platform functions 406 * platform functions
407 */ 407 */
408static int __devinit usbhs_probe(struct platform_device *pdev) 408static int usbhs_probe(struct platform_device *pdev)
409{ 409{
410 struct renesas_usbhs_platform_info *info = pdev->dev.platform_data; 410 struct renesas_usbhs_platform_info *info = pdev->dev.platform_data;
411 struct renesas_usbhs_driver_callback *dfunc; 411 struct renesas_usbhs_driver_callback *dfunc;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 8da685e796d1..ffdf5d15085e 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -820,7 +820,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
820 if (len % 4) /* 32bit alignment */ 820 if (len % 4) /* 32bit alignment */
821 goto usbhsf_pio_prepare_push; 821 goto usbhsf_pio_prepare_push;
822 822
823 if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ 823 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
824 goto usbhsf_pio_prepare_push; 824 goto usbhsf_pio_prepare_push;
825 825
826 /* get enable DMA fifo */ 826 /* get enable DMA fifo */
@@ -897,7 +897,7 @@ static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
897 if (!fifo) 897 if (!fifo)
898 goto usbhsf_pio_prepare_pop; 898 goto usbhsf_pio_prepare_pop;
899 899
900 if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ 900 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
901 goto usbhsf_pio_prepare_pop; 901 goto usbhsf_pio_prepare_pop;
902 902
903 ret = usbhsf_fifo_select(pipe, fifo, 0); 903 ret = usbhsf_fifo_select(pipe, fifo, 0);
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 053f86d70009..ad96a3896729 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -349,7 +349,7 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
349 if (mod->irq_attch) 349 if (mod->irq_attch)
350 intenb1 |= ATTCHE; 350 intenb1 |= ATTCHE;
351 351
352 if (mod->irq_attch) 352 if (mod->irq_dtch)
353 intenb1 |= DTCHE; 353 intenb1 |= DTCHE;
354 354
355 if (mod->irq_sign) 355 if (mod->irq_sign)
diff --git a/drivers/usb/renesas_usbhs/mod.h b/drivers/usb/renesas_usbhs/mod.h
index 8ae3733031cd..6c6875533f01 100644
--- a/drivers/usb/renesas_usbhs/mod.h
+++ b/drivers/usb/renesas_usbhs/mod.h
@@ -143,8 +143,8 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod);
143 */ 143 */
144#if defined(CONFIG_USB_RENESAS_USBHS_HCD) || \ 144#if defined(CONFIG_USB_RENESAS_USBHS_HCD) || \
145 defined(CONFIG_USB_RENESAS_USBHS_HCD_MODULE) 145 defined(CONFIG_USB_RENESAS_USBHS_HCD_MODULE)
146extern int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv); 146extern int usbhs_mod_host_probe(struct usbhs_priv *priv);
147extern int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv); 147extern int usbhs_mod_host_remove(struct usbhs_priv *priv);
148#else 148#else
149static inline int usbhs_mod_host_probe(struct usbhs_priv *priv) 149static inline int usbhs_mod_host_probe(struct usbhs_priv *priv)
150{ 150{
@@ -157,8 +157,8 @@ static inline void usbhs_mod_host_remove(struct usbhs_priv *priv)
157 157
158#if defined(CONFIG_USB_RENESAS_USBHS_UDC) || \ 158#if defined(CONFIG_USB_RENESAS_USBHS_UDC) || \
159 defined(CONFIG_USB_RENESAS_USBHS_UDC_MODULE) 159 defined(CONFIG_USB_RENESAS_USBHS_UDC_MODULE)
160extern int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv); 160extern int usbhs_mod_gadget_probe(struct usbhs_priv *priv);
161extern void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv); 161extern void usbhs_mod_gadget_remove(struct usbhs_priv *priv);
162#else 162#else
163static inline int usbhs_mod_gadget_probe(struct usbhs_priv *priv) 163static inline int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
164{ 164{
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 4cc7ee0babc6..7f4e80338570 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -751,53 +751,32 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
751 struct usb_gadget_driver *driver) 751 struct usb_gadget_driver *driver)
752{ 752{
753 struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); 753 struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
754 struct usbhs_priv *priv; 754 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
755 struct device *dev;
756 int ret;
757 755
758 if (!driver || 756 if (!driver ||
759 !driver->setup || 757 !driver->setup ||
760 driver->speed != USB_SPEED_HIGH) 758 driver->speed < USB_SPEED_FULL)
761 return -EINVAL; 759 return -EINVAL;
762 760
763 dev = usbhsg_gpriv_to_dev(gpriv);
764 priv = usbhsg_gpriv_to_priv(gpriv);
765
766 /* first hook up the driver ... */ 761 /* first hook up the driver ... */
767 gpriv->driver = driver; 762 gpriv->driver = driver;
768 gpriv->gadget.dev.driver = &driver->driver; 763 gpriv->gadget.dev.driver = &driver->driver;
769 764
770 ret = device_add(&gpriv->gadget.dev);
771 if (ret) {
772 dev_err(dev, "device_add error %d\n", ret);
773 goto add_fail;
774 }
775
776 return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD); 765 return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
777
778add_fail:
779 gpriv->driver = NULL;
780 gpriv->gadget.dev.driver = NULL;
781
782 return ret;
783} 766}
784 767
785static int usbhsg_gadget_stop(struct usb_gadget *gadget, 768static int usbhsg_gadget_stop(struct usb_gadget *gadget,
786 struct usb_gadget_driver *driver) 769 struct usb_gadget_driver *driver)
787{ 770{
788 struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); 771 struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
789 struct usbhs_priv *priv; 772 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
790 struct device *dev;
791 773
792 if (!driver || 774 if (!driver ||
793 !driver->unbind) 775 !driver->unbind)
794 return -EINVAL; 776 return -EINVAL;
795 777
796 dev = usbhsg_gpriv_to_dev(gpriv);
797 priv = usbhsg_gpriv_to_priv(gpriv);
798
799 usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD); 778 usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
800 device_del(&gpriv->gadget.dev); 779 gpriv->gadget.dev.driver = NULL;
801 gpriv->driver = NULL; 780 gpriv->driver = NULL;
802 781
803 return 0; 782 return 0;
@@ -827,10 +806,17 @@ static int usbhsg_start(struct usbhs_priv *priv)
827 806
828static int usbhsg_stop(struct usbhs_priv *priv) 807static int usbhsg_stop(struct usbhs_priv *priv)
829{ 808{
809 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
810
811 /* cable disconnect */
812 if (gpriv->driver &&
813 gpriv->driver->disconnect)
814 gpriv->driver->disconnect(&gpriv->gadget);
815
830 return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED); 816 return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
831} 817}
832 818
833int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv) 819int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
834{ 820{
835 struct usbhsg_gpriv *gpriv; 821 struct usbhsg_gpriv *gpriv;
836 struct usbhsg_uep *uep; 822 struct usbhsg_uep *uep;
@@ -876,12 +862,14 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
876 /* 862 /*
877 * init gadget 863 * init gadget
878 */ 864 */
879 device_initialize(&gpriv->gadget.dev);
880 dev_set_name(&gpriv->gadget.dev, "gadget"); 865 dev_set_name(&gpriv->gadget.dev, "gadget");
881 gpriv->gadget.dev.parent = dev; 866 gpriv->gadget.dev.parent = dev;
882 gpriv->gadget.name = "renesas_usbhs_udc"; 867 gpriv->gadget.name = "renesas_usbhs_udc";
883 gpriv->gadget.ops = &usbhsg_gadget_ops; 868 gpriv->gadget.ops = &usbhsg_gadget_ops;
884 gpriv->gadget.is_dualspeed = 1; 869 gpriv->gadget.is_dualspeed = 1;
870 ret = device_register(&gpriv->gadget.dev);
871 if (ret < 0)
872 goto err_add_udc;
885 873
886 INIT_LIST_HEAD(&gpriv->gadget.ep_list); 874 INIT_LIST_HEAD(&gpriv->gadget.ep_list);
887 875
@@ -912,12 +900,15 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
912 900
913 ret = usb_add_gadget_udc(dev, &gpriv->gadget); 901 ret = usb_add_gadget_udc(dev, &gpriv->gadget);
914 if (ret) 902 if (ret)
915 goto err_add_udc; 903 goto err_register;
916 904
917 905
918 dev_info(dev, "gadget probed\n"); 906 dev_info(dev, "gadget probed\n");
919 907
920 return 0; 908 return 0;
909
910err_register:
911 device_unregister(&gpriv->gadget.dev);
921err_add_udc: 912err_add_udc:
922 kfree(gpriv->uep); 913 kfree(gpriv->uep);
923 914
@@ -927,12 +918,14 @@ usbhs_mod_gadget_probe_err_gpriv:
927 return ret; 918 return ret;
928} 919}
929 920
930void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv) 921void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
931{ 922{
932 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); 923 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
933 924
934 usb_del_gadget_udc(&gpriv->gadget); 925 usb_del_gadget_udc(&gpriv->gadget);
935 926
927 device_unregister(&gpriv->gadget.dev);
928
936 usbhsg_controller_unregister(gpriv); 929 usbhsg_controller_unregister(gpriv);
937 930
938 kfree(gpriv->uep); 931 kfree(gpriv->uep);
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 1a7208a50afc..7955de589951 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -103,7 +103,7 @@ struct usbhsh_hpriv {
103 103
104 u32 port_stat; /* USB_PORT_STAT_xxx */ 104 u32 port_stat; /* USB_PORT_STAT_xxx */
105 105
106 struct completion *done; 106 struct completion setup_ack_done;
107 107
108 /* see usbhsh_req_alloc/free */ 108 /* see usbhsh_req_alloc/free */
109 struct list_head ureq_link_active; 109 struct list_head ureq_link_active;
@@ -355,6 +355,7 @@ static void usbhsh_device_free(struct usbhsh_hpriv *hpriv,
355struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv, 355struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
356 struct usbhsh_device *udev, 356 struct usbhsh_device *udev,
357 struct usb_host_endpoint *ep, 357 struct usb_host_endpoint *ep,
358 int dir_in_req,
358 gfp_t mem_flags) 359 gfp_t mem_flags)
359{ 360{
360 struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv); 361 struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
@@ -364,27 +365,38 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
364 struct usbhs_pipe *pipe, *best_pipe; 365 struct usbhs_pipe *pipe, *best_pipe;
365 struct device *dev = usbhsh_hcd_to_dev(hcd); 366 struct device *dev = usbhsh_hcd_to_dev(hcd);
366 struct usb_endpoint_descriptor *desc = &ep->desc; 367 struct usb_endpoint_descriptor *desc = &ep->desc;
367 int type, i; 368 int type, i, dir_in;
368 unsigned int min_usr; 369 unsigned int min_usr;
369 370
371 dir_in_req = !!dir_in_req;
372
370 uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags); 373 uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags);
371 if (!uep) { 374 if (!uep) {
372 dev_err(dev, "usbhsh_ep alloc fail\n"); 375 dev_err(dev, "usbhsh_ep alloc fail\n");
373 return NULL; 376 return NULL;
374 } 377 }
375 type = usb_endpoint_type(desc); 378
379 if (usb_endpoint_xfer_control(desc)) {
380 best_pipe = usbhsh_hpriv_to_dcp(hpriv);
381 goto usbhsh_endpoint_alloc_find_pipe;
382 }
376 383
377 /* 384 /*
378 * find best pipe for endpoint 385 * find best pipe for endpoint
379 * see 386 * see
380 * HARDWARE LIMITATION 387 * HARDWARE LIMITATION
381 */ 388 */
389 type = usb_endpoint_type(desc);
382 min_usr = ~0; 390 min_usr = ~0;
383 best_pipe = NULL; 391 best_pipe = NULL;
384 usbhs_for_each_pipe_with_dcp(pipe, priv, i) { 392 usbhs_for_each_pipe(pipe, priv, i) {
385 if (!usbhs_pipe_type_is(pipe, type)) 393 if (!usbhs_pipe_type_is(pipe, type))
386 continue; 394 continue;
387 395
396 dir_in = !!usbhs_pipe_is_dir_in(pipe);
397 if (0 != (dir_in - dir_in_req))
398 continue;
399
388 info = usbhsh_pipe_info(pipe); 400 info = usbhsh_pipe_info(pipe);
389 401
390 if (min_usr > info->usr_cnt) { 402 if (min_usr > info->usr_cnt) {
@@ -398,7 +410,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
398 kfree(uep); 410 kfree(uep);
399 return NULL; 411 return NULL;
400 } 412 }
401 413usbhsh_endpoint_alloc_find_pipe:
402 /* 414 /*
403 * init uep 415 * init uep
404 */ 416 */
@@ -423,6 +435,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
423 * see 435 * see
424 * DCPMAXP/PIPEMAXP 436 * DCPMAXP/PIPEMAXP
425 */ 437 */
438 usbhs_pipe_sequence_data0(uep->pipe);
426 usbhs_pipe_config_update(uep->pipe, 439 usbhs_pipe_config_update(uep->pipe,
427 usbhsh_device_number(hpriv, udev), 440 usbhsh_device_number(hpriv, udev),
428 usb_endpoint_num(desc), 441 usb_endpoint_num(desc),
@@ -430,7 +443,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
430 443
431 dev_dbg(dev, "%s [%d-%s](%p)\n", __func__, 444 dev_dbg(dev, "%s [%d-%s](%p)\n", __func__,
432 usbhsh_device_number(hpriv, udev), 445 usbhsh_device_number(hpriv, udev),
433 usbhs_pipe_name(pipe), uep); 446 usbhs_pipe_name(uep->pipe), uep);
434 447
435 return uep; 448 return uep;
436} 449}
@@ -549,8 +562,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
549 * usbhsh_irq_setup_ack() 562 * usbhsh_irq_setup_ack()
550 * usbhsh_irq_setup_err() 563 * usbhsh_irq_setup_err()
551 */ 564 */
552 DECLARE_COMPLETION(done); 565 init_completion(&hpriv->setup_ack_done);
553 hpriv->done = &done;
554 566
555 /* copy original request */ 567 /* copy original request */
556 memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest)); 568 memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest));
@@ -572,8 +584,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
572 /* 584 /*
573 * wait setup packet ACK 585 * wait setup packet ACK
574 */ 586 */
575 wait_for_completion(&done); 587 wait_for_completion(&hpriv->setup_ack_done);
576 hpriv->done = NULL;
577 588
578 dev_dbg(dev, "%s done\n", __func__); 589 dev_dbg(dev, "%s done\n", __func__);
579} 590}
@@ -724,11 +735,11 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
724 struct usbhsh_device *udev, *new_udev = NULL; 735 struct usbhsh_device *udev, *new_udev = NULL;
725 struct usbhs_pipe *pipe; 736 struct usbhs_pipe *pipe;
726 struct usbhsh_ep *uep; 737 struct usbhsh_ep *uep;
738 int is_dir_in = usb_pipein(urb->pipe);
727 739
728 int ret; 740 int ret;
729 741
730 dev_dbg(dev, "%s (%s)\n", 742 dev_dbg(dev, "%s (%s)\n", __func__, is_dir_in ? "in" : "out");
731 __func__, usb_pipein(urb->pipe) ? "in" : "out");
732 743
733 ret = usb_hcd_link_urb_to_ep(hcd, urb); 744 ret = usb_hcd_link_urb_to_ep(hcd, urb);
734 if (ret) 745 if (ret)
@@ -751,7 +762,8 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
751 */ 762 */
752 uep = usbhsh_ep_to_uep(ep); 763 uep = usbhsh_ep_to_uep(ep);
753 if (!uep) { 764 if (!uep) {
754 uep = usbhsh_endpoint_alloc(hpriv, udev, ep, mem_flags); 765 uep = usbhsh_endpoint_alloc(hpriv, udev, ep,
766 is_dir_in, mem_flags);
755 if (!uep) 767 if (!uep)
756 goto usbhsh_urb_enqueue_error_free_device; 768 goto usbhsh_urb_enqueue_error_free_device;
757 } 769 }
@@ -1095,10 +1107,7 @@ static int usbhsh_irq_setup_ack(struct usbhs_priv *priv,
1095 1107
1096 dev_dbg(dev, "setup packet OK\n"); 1108 dev_dbg(dev, "setup packet OK\n");
1097 1109
1098 if (unlikely(!hpriv->done)) 1110 complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */
1099 dev_err(dev, "setup ack happen without necessary data\n");
1100 else
1101 complete(hpriv->done); /* see usbhsh_urb_enqueue() */
1102 1111
1103 return 0; 1112 return 0;
1104} 1113}
@@ -1111,10 +1120,7 @@ static int usbhsh_irq_setup_err(struct usbhs_priv *priv,
1111 1120
1112 dev_dbg(dev, "setup packet Err\n"); 1121 dev_dbg(dev, "setup packet Err\n");
1113 1122
1114 if (unlikely(!hpriv->done)) 1123 complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */
1115 dev_err(dev, "setup err happen without necessary data\n");
1116 else
1117 complete(hpriv->done); /* see usbhsh_urb_enqueue() */
1118 1124
1119 return 0; 1125 return 0;
1120} 1126}
@@ -1221,8 +1227,18 @@ static int usbhsh_stop(struct usbhs_priv *priv)
1221{ 1227{
1222 struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv); 1228 struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
1223 struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv); 1229 struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
1230 struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1224 struct device *dev = usbhs_priv_to_dev(priv); 1231 struct device *dev = usbhs_priv_to_dev(priv);
1225 1232
1233 /*
1234 * disable irq callback
1235 */
1236 mod->irq_attch = NULL;
1237 mod->irq_dtch = NULL;
1238 mod->irq_sack = NULL;
1239 mod->irq_sign = NULL;
1240 usbhs_irq_callback_update(priv, mod);
1241
1226 usb_remove_hcd(hcd); 1242 usb_remove_hcd(hcd);
1227 1243
1228 /* disable sys */ 1244 /* disable sys */
@@ -1235,7 +1251,7 @@ static int usbhsh_stop(struct usbhs_priv *priv)
1235 return 0; 1251 return 0;
1236} 1252}
1237 1253
1238int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv) 1254int usbhs_mod_host_probe(struct usbhs_priv *priv)
1239{ 1255{
1240 struct usbhsh_hpriv *hpriv; 1256 struct usbhsh_hpriv *hpriv;
1241 struct usb_hcd *hcd; 1257 struct usb_hcd *hcd;
@@ -1251,6 +1267,7 @@ int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv)
1251 dev_err(dev, "Failed to create hcd\n"); 1267 dev_err(dev, "Failed to create hcd\n");
1252 return -ENOMEM; 1268 return -ENOMEM;
1253 } 1269 }
1270 hcd->has_tt = 1; /* for low/full speed */
1254 1271
1255 pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL); 1272 pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
1256 if (!pipe_info) { 1273 if (!pipe_info) {
@@ -1279,7 +1296,6 @@ int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv)
1279 hpriv->mod.stop = usbhsh_stop; 1296 hpriv->mod.stop = usbhsh_stop;
1280 hpriv->pipe_info = pipe_info; 1297 hpriv->pipe_info = pipe_info;
1281 hpriv->pipe_size = pipe_size; 1298 hpriv->pipe_size = pipe_size;
1282 hpriv->done = NULL;
1283 usbhsh_req_list_init(hpriv); 1299 usbhsh_req_list_init(hpriv);
1284 usbhsh_port_stat_init(hpriv); 1300 usbhsh_port_stat_init(hpriv);
1285 1301
@@ -1299,7 +1315,7 @@ usbhs_mod_host_probe_err:
1299 return -ENOMEM; 1315 return -ENOMEM;
1300} 1316}
1301 1317
1302int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv) 1318int usbhs_mod_host_remove(struct usbhs_priv *priv)
1303{ 1319{
1304 struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv); 1320 struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
1305 struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv); 1321 struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 5cdb9d912275..18e875b92e00 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -42,7 +42,7 @@ static int debug;
42 * Version information 42 * Version information
43 */ 43 */
44 44
45#define DRIVER_VERSION "v0.6" 45#define DRIVER_VERSION "v0.7"
46#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>" 46#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>"
47#define DRIVER_DESC "USB ARK3116 serial/IrDA driver" 47#define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
48#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA" 48#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
@@ -380,10 +380,6 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
380 goto err_out; 380 goto err_out;
381 } 381 }
382 382
383 /* setup termios */
384 if (tty)
385 ark3116_set_termios(tty, port, NULL);
386
387 /* remove any data still left: also clears error state */ 383 /* remove any data still left: also clears error state */
388 ark3116_read_reg(serial, UART_RX, buf); 384 ark3116_read_reg(serial, UART_RX, buf);
389 385
@@ -406,6 +402,10 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
406 /* enable DMA */ 402 /* enable DMA */
407 ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT); 403 ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT);
408 404
405 /* setup termios */
406 if (tty)
407 ark3116_set_termios(tty, port, NULL);
408
409err_out: 409err_out:
410 kfree(buf); 410 kfree(buf);
411 return result; 411 return result;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8fe034d2d3e7..ff3db5d056a5 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -736,6 +736,7 @@ static struct usb_device_id id_table_combined [] = {
736 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, 736 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
737 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, 737 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
738 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, 738 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
739 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
739 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), 740 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
740 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 741 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
741 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), 742 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
@@ -2104,13 +2105,19 @@ static void ftdi_set_termios(struct tty_struct *tty,
2104 2105
2105 cflag = termios->c_cflag; 2106 cflag = termios->c_cflag;
2106 2107
2107 /* FIXME -For this cut I don't care if the line is really changing or 2108 if (old_termios->c_cflag == termios->c_cflag
2108 not - so just do the change regardless - should be able to 2109 && old_termios->c_ispeed == termios->c_ispeed
2109 compare old_termios and tty->termios */ 2110 && old_termios->c_ospeed == termios->c_ospeed)
2111 goto no_c_cflag_changes;
2112
2110 /* NOTE These routines can get interrupted by 2113 /* NOTE These routines can get interrupted by
2111 ftdi_sio_read_bulk_callback - need to examine what this means - 2114 ftdi_sio_read_bulk_callback - need to examine what this means -
2112 don't see any problems yet */ 2115 don't see any problems yet */
2113 2116
2117 if ((old_termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)) ==
2118 (termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)))
2119 goto no_data_parity_stop_changes;
2120
2114 /* Set number of data bits, parity, stop bits */ 2121 /* Set number of data bits, parity, stop bits */
2115 2122
2116 urb_value = 0; 2123 urb_value = 0;
@@ -2151,6 +2158,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2151 } 2158 }
2152 2159
2153 /* Now do the baudrate */ 2160 /* Now do the baudrate */
2161no_data_parity_stop_changes:
2154 if ((cflag & CBAUD) == B0) { 2162 if ((cflag & CBAUD) == B0) {
2155 /* Disable flow control */ 2163 /* Disable flow control */
2156 if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 2164 if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
@@ -2178,6 +2186,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2178 2186
2179 /* Set flow control */ 2187 /* Set flow control */
2180 /* Note device also supports DTR/CD (ugh) and Xon/Xoff in hardware */ 2188 /* Note device also supports DTR/CD (ugh) and Xon/Xoff in hardware */
2189no_c_cflag_changes:
2181 if (cflag & CRTSCTS) { 2190 if (cflag & CRTSCTS) {
2182 dbg("%s Setting to CRTSCTS flow control", __func__); 2191 dbg("%s Setting to CRTSCTS flow control", __func__);
2183 if (usb_control_msg(dev, 2192 if (usb_control_msg(dev,
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 571fa96b49c7..055b64ef0bba 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -112,6 +112,7 @@
112 112
113/* Propox devices */ 113/* Propox devices */
114#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 114#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
115#define FTDI_PROPOX_ISPCABLEIII_PID 0xD739
115 116
116/* Lenz LI-USB Computer Interface. */ 117/* Lenz LI-USB Computer Interface. */
117#define FTDI_LENZ_LIUSB_PID 0xD780 118#define FTDI_LENZ_LIUSB_PID 0xD780
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 89ae1f65e1b1..6dd64534fad0 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -156,6 +156,7 @@ static void option_instat_callback(struct urb *urb);
156#define HUAWEI_PRODUCT_K4511 0x14CC 156#define HUAWEI_PRODUCT_K4511 0x14CC
157#define HUAWEI_PRODUCT_ETS1220 0x1803 157#define HUAWEI_PRODUCT_ETS1220 0x1803
158#define HUAWEI_PRODUCT_E353 0x1506 158#define HUAWEI_PRODUCT_E353 0x1506
159#define HUAWEI_PRODUCT_E173S 0x1C05
159 160
160#define QUANTA_VENDOR_ID 0x0408 161#define QUANTA_VENDOR_ID 0x0408
161#define QUANTA_PRODUCT_Q101 0xEA02 162#define QUANTA_PRODUCT_Q101 0xEA02
@@ -316,6 +317,9 @@ static void option_instat_callback(struct urb *urb);
316#define ZTE_PRODUCT_AC8710 0xfff1 317#define ZTE_PRODUCT_AC8710 0xfff1
317#define ZTE_PRODUCT_AC2726 0xfff5 318#define ZTE_PRODUCT_AC2726 0xfff5
318#define ZTE_PRODUCT_AC8710T 0xffff 319#define ZTE_PRODUCT_AC8710T 0xffff
320#define ZTE_PRODUCT_MC2718 0xffe8
321#define ZTE_PRODUCT_AD3812 0xffeb
322#define ZTE_PRODUCT_MC2716 0xffed
319 323
320#define BENQ_VENDOR_ID 0x04a5 324#define BENQ_VENDOR_ID 0x04a5
321#define BENQ_PRODUCT_H10 0x4068 325#define BENQ_PRODUCT_H10 0x4068
@@ -468,6 +472,10 @@ static void option_instat_callback(struct urb *urb);
468#define YUGA_PRODUCT_CLU528 0x260D 472#define YUGA_PRODUCT_CLU528 0x260D
469#define YUGA_PRODUCT_CLU526 0x260F 473#define YUGA_PRODUCT_CLU526 0x260F
470 474
475/* Viettel products */
476#define VIETTEL_VENDOR_ID 0x2262
477#define VIETTEL_PRODUCT_VT1000 0x0002
478
471/* some devices interfaces need special handling due to a number of reasons */ 479/* some devices interfaces need special handling due to a number of reasons */
472enum option_blacklist_reason { 480enum option_blacklist_reason {
473 OPTION_BLACKLIST_NONE = 0, 481 OPTION_BLACKLIST_NONE = 0,
@@ -500,6 +508,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = {
500 .reserved = BIT(4), 508 .reserved = BIT(4),
501}; 509};
502 510
511static const struct option_blacklist_info zte_ad3812_z_blacklist = {
512 .sendsetup = BIT(0) | BIT(1) | BIT(2),
513};
514
515static const struct option_blacklist_info zte_mc2718_z_blacklist = {
516 .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
517};
518
519static const struct option_blacklist_info zte_mc2716_z_blacklist = {
520 .sendsetup = BIT(1) | BIT(2) | BIT(3),
521};
522
503static const struct option_blacklist_info huawei_cdc12_blacklist = { 523static const struct option_blacklist_info huawei_cdc12_blacklist = {
504 .reserved = BIT(1) | BIT(2), 524 .reserved = BIT(1) | BIT(2),
505}; 525};
@@ -622,6 +642,7 @@ static const struct usb_device_id option_ids[] = {
622 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, 642 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
623 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, 643 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
624 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, 644 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
645 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },
625 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), 646 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
626 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, 647 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
627 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), 648 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
@@ -640,6 +661,14 @@ static const struct usb_device_id option_ids[] = {
640 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) }, 661 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
641 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) }, 662 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
642 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, 663 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
664 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
665 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
666 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
667 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
668 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
669 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */
670 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */
671 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */
643 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 672 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
644 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, 673 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
645 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, 674 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -726,6 +755,7 @@ static const struct usb_device_id option_ids[] = {
726 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 755 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
727 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ 756 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
728 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 757 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
758 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
729 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ 759 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
730 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, 760 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
731 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, 761 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
@@ -1043,6 +1073,12 @@ static const struct usb_device_id option_ids[] = {
1043 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, 1073 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
1044 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, 1074 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
1045 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, 1075 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
1076 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
1077 .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
1078 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
1079 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
1080 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
1081 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
1046 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 1082 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
1047 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 1083 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
1048 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ 1084 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
@@ -1141,6 +1177,7 @@ static const struct usb_device_id option_ids[] = {
1141 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, 1177 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
1142 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, 1178 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
1143 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, 1179 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
1180 { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
1144 { } /* Terminating entry */ 1181 { } /* Terminating entry */
1145}; 1182};
1146MODULE_DEVICE_TABLE(usb, option_ids); 1183MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9083d1e616b4..fc2d66f7f4eb 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -91,7 +91,6 @@ static const struct usb_device_id id_table[] = {
91 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, 91 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
92 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, 92 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
93 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, 93 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
94 { USB_DEVICE(WINCHIPHEAD_VENDOR_ID, WINCHIPHEAD_USBSER_PRODUCT_ID) },
95 { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, 94 { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
96 { } /* Terminating entry */ 95 { } /* Terminating entry */
97}; 96};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 3d10d7f02072..c38b8c00c06f 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -145,10 +145,6 @@
145#define ADLINK_VENDOR_ID 0x0b63 145#define ADLINK_VENDOR_ID 0x0b63
146#define ADLINK_ND6530_PRODUCT_ID 0x6530 146#define ADLINK_ND6530_PRODUCT_ID 0x6530
147 147
148/* WinChipHead USB->RS 232 adapter */
149#define WINCHIPHEAD_VENDOR_ID 0x4348
150#define WINCHIPHEAD_USBSER_PRODUCT_ID 0x5523
151
152/* SMART USB Serial Adapter */ 148/* SMART USB Serial Adapter */
153#define SMART_VENDOR_ID 0x0b8c 149#define SMART_VENDOR_ID 0x0b8c
154#define SMART_PRODUCT_ID 0x2303 150#define SMART_PRODUCT_ID 0x2303
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 4dca3ef0668c..9fbe742343c6 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -1762,10 +1762,9 @@ static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb)
1762 result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1); 1762 result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1);
1763 } else { 1763 } else {
1764 void *buf; 1764 void *buf;
1765 int offset; 1765 int offset = 0;
1766 u16 PhyBlockAddr; 1766 u16 PhyBlockAddr;
1767 u8 PageNum; 1767 u8 PageNum;
1768 u32 result;
1769 u16 len, oldphy, newphy; 1768 u16 len, oldphy, newphy;
1770 1769
1771 buf = kmalloc(blenByte, GFP_KERNEL); 1770 buf = kmalloc(blenByte, GFP_KERNEL);
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 93c1a4d86f51..82dd834709c7 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -59,7 +59,9 @@
59 59
60void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us) 60void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
61{ 61{
62 /* Pad the SCSI command with zeros out to 12 bytes 62 /*
63 * Pad the SCSI command with zeros out to 12 bytes. If the
64 * command already is 12 bytes or longer, leave it alone.
63 * 65 *
64 * NOTE: This only works because a scsi_cmnd struct field contains 66 * NOTE: This only works because a scsi_cmnd struct field contains
65 * a unsigned char cmnd[16], so we know we have storage available 67 * a unsigned char cmnd[16], so we know we have storage available
@@ -67,9 +69,6 @@ void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
67 for (; srb->cmd_len<12; srb->cmd_len++) 69 for (; srb->cmd_len<12; srb->cmd_len++)
68 srb->cmnd[srb->cmd_len] = 0; 70 srb->cmnd[srb->cmd_len] = 0;
69 71
70 /* set command length to 12 bytes */
71 srb->cmd_len = 12;
72
73 /* send the command to the transport layer */ 72 /* send the command to the transport layer */
74 usb_stor_invoke_transport(srb, us); 73 usb_stor_invoke_transport(srb, us);
75} 74}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 3041a974faf3..24caba79d722 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1854,6 +1854,13 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
1854 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1854 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1855 US_FL_IGNORE_RESIDUE ), 1855 US_FL_IGNORE_RESIDUE ),
1856 1856
1857/* Reported by Qinglin Ye <yestyle@gmail.com> */
1858UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100,
1859 "Kingston",
1860 "DT 101 G2",
1861 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1862 US_FL_BULK_IGNORE_TAG ),
1863
1857/* Reported by Francesco Foresti <frafore@tiscali.it> */ 1864/* Reported by Francesco Foresti <frafore@tiscali.it> */
1858UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201, 1865UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
1859 "Super Top", 1866 "Super Top",
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 55f91d9ab00b..29577bf1f559 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -116,6 +116,7 @@
116/* Clock registers available only on Version 2 */ 116/* Clock registers available only on Version 2 */
117#define LCD_CLK_ENABLE_REG 0x6c 117#define LCD_CLK_ENABLE_REG 0x6c
118#define LCD_CLK_RESET_REG 0x70 118#define LCD_CLK_RESET_REG 0x70
119#define LCD_CLK_MAIN_RESET BIT(3)
119 120
120#define LCD_NUM_BUFFERS 2 121#define LCD_NUM_BUFFERS 2
121 122
@@ -244,6 +245,10 @@ static inline void lcd_enable_raster(void)
244{ 245{
245 u32 reg; 246 u32 reg;
246 247
248 /* Bring LCDC out of reset */
249 if (lcd_revision == LCD_VERSION_2)
250 lcdc_write(0, LCD_CLK_RESET_REG);
251
247 reg = lcdc_read(LCD_RASTER_CTRL_REG); 252 reg = lcdc_read(LCD_RASTER_CTRL_REG);
248 if (!(reg & LCD_RASTER_ENABLE)) 253 if (!(reg & LCD_RASTER_ENABLE))
249 lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); 254 lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
@@ -257,6 +262,10 @@ static inline void lcd_disable_raster(void)
257 reg = lcdc_read(LCD_RASTER_CTRL_REG); 262 reg = lcdc_read(LCD_RASTER_CTRL_REG);
258 if (reg & LCD_RASTER_ENABLE) 263 if (reg & LCD_RASTER_ENABLE)
259 lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); 264 lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
265
266 if (lcd_revision == LCD_VERSION_2)
267 /* Write 1 to reset LCDC */
268 lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
260} 269}
261 270
262static void lcd_blit(int load_mode, struct da8xx_fb_par *par) 271static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
@@ -584,8 +593,12 @@ static void lcd_reset(struct da8xx_fb_par *par)
584 lcdc_write(0, LCD_DMA_CTRL_REG); 593 lcdc_write(0, LCD_DMA_CTRL_REG);
585 lcdc_write(0, LCD_RASTER_CTRL_REG); 594 lcdc_write(0, LCD_RASTER_CTRL_REG);
586 595
587 if (lcd_revision == LCD_VERSION_2) 596 if (lcd_revision == LCD_VERSION_2) {
588 lcdc_write(0, LCD_INT_ENABLE_SET_REG); 597 lcdc_write(0, LCD_INT_ENABLE_SET_REG);
598 /* Write 1 to reset */
599 lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
600 lcdc_write(0, LCD_CLK_RESET_REG);
601 }
589} 602}
590 603
591static void lcd_calc_clk_divider(struct da8xx_fb_par *par) 604static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 0ccd7adf47bb..6f61e781f15a 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -19,6 +19,7 @@
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */ 20 */
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/module.h>
22#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
23#include <linux/mm.h> 24#include <linux/mm.h>
24#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 3532782551cb..5c81533eacaa 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -1720,12 +1720,11 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
1720 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); 1720 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
1721 unsigned long fclk = 0; 1721 unsigned long fclk = 0;
1722 1722
1723 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { 1723 if (width == out_width && height == out_height)
1724 if (width != out_width || height != out_height) 1724 return 0;
1725 return -EINVAL; 1725
1726 else 1726 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
1727 return 0; 1727 return -EINVAL;
1728 }
1729 1728
1730 if (out_width < width / maxdownscale || 1729 if (out_width < width / maxdownscale ||
1731 out_width > width * 8) 1730 out_width > width * 8)
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 3262f0f1fa35..c56378c555b0 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -269,7 +269,7 @@ static void update_hdmi_timings(struct hdmi_config *cfg,
269unsigned long hdmi_get_pixel_clock(void) 269unsigned long hdmi_get_pixel_clock(void)
270{ 270{
271 /* HDMI Pixel Clock in Mhz */ 271 /* HDMI Pixel Clock in Mhz */
272 return hdmi.ip_data.cfg.timings.timings.pixel_clock * 10000; 272 return hdmi.ip_data.cfg.timings.timings.pixel_clock * 1000;
273} 273}
274 274
275static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy, 275static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 69d882cbe709..c01c1c162726 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -559,8 +559,8 @@
559#define M1200X720_R60_VSP POSITIVE 559#define M1200X720_R60_VSP POSITIVE
560 560
561/* 1200x900@60 Sync Polarity (DCON) */ 561/* 1200x900@60 Sync Polarity (DCON) */
562#define M1200X900_R60_HSP NEGATIVE 562#define M1200X900_R60_HSP POSITIVE
563#define M1200X900_R60_VSP NEGATIVE 563#define M1200X900_R60_VSP POSITIVE
564 564
565/* 1280x600@60 Sync Polarity (GTF Mode) */ 565/* 1280x600@60 Sync Polarity (GTF Mode) */
566#define M1280x600_R60_HSP NEGATIVE 566#define M1280x600_R60_HSP NEGATIVE
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 816ed08e7cf3..1a61939b85fc 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -37,7 +37,7 @@ config VIRTIO_BALLOON
37 37
38 config VIRTIO_MMIO 38 config VIRTIO_MMIO
39 tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)" 39 tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)"
40 depends on EXPERIMENTAL 40 depends on HAS_IOMEM && EXPERIMENTAL
41 select VIRTIO 41 select VIRTIO
42 select VIRTIO_RING 42 select VIRTIO_RING
43 ---help--- 43 ---help---
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index acc5e43c373e..7317dc2ec426 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -118,7 +118,7 @@ static void vm_finalize_features(struct virtio_device *vdev)
118 vring_transport_features(vdev); 118 vring_transport_features(vdev);
119 119
120 for (i = 0; i < ARRAY_SIZE(vdev->features); i++) { 120 for (i = 0; i < ARRAY_SIZE(vdev->features); i++) {
121 writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SET); 121 writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL);
122 writel(vdev->features[i], 122 writel(vdev->features[i],
123 vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); 123 vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
124 } 124 }
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 79a31e5b4b68..03d1984bd363 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -169,11 +169,29 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
169 iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); 169 iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
170} 170}
171 171
172/* wait for pending irq handlers */
173static void vp_synchronize_vectors(struct virtio_device *vdev)
174{
175 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
176 int i;
177
178 if (vp_dev->intx_enabled)
179 synchronize_irq(vp_dev->pci_dev->irq);
180
181 for (i = 0; i < vp_dev->msix_vectors; ++i)
182 synchronize_irq(vp_dev->msix_entries[i].vector);
183}
184
172static void vp_reset(struct virtio_device *vdev) 185static void vp_reset(struct virtio_device *vdev)
173{ 186{
174 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 187 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
175 /* 0 status means a reset. */ 188 /* 0 status means a reset. */
176 iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); 189 iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
190 /* Flush out the status write, and flush in device writes,
191 * including MSi-X interrupts, if any. */
192 ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
193 /* Flush pending VQ/configuration callbacks. */
194 vp_synchronize_vectors(vdev);
177} 195}
178 196
179/* the notify function used when creating a virt queue */ 197/* the notify function used when creating a virt queue */
@@ -594,11 +612,11 @@ static struct virtio_config_ops virtio_pci_config_ops = {
594 612
595static void virtio_pci_release_dev(struct device *_d) 613static void virtio_pci_release_dev(struct device *_d)
596{ 614{
597 struct virtio_device *dev = container_of(_d, struct virtio_device, 615 /*
598 dev); 616 * No need for a release method as we allocate/free
599 struct virtio_pci_device *vp_dev = to_vp_device(dev); 617 * all devices together with the pci devices.
600 618 * Provide an empty one to avoid getting a warning from core.
601 kfree(vp_dev); 619 */
602} 620}
603 621
604/* the PCI probing function */ 622/* the PCI probing function */
@@ -686,6 +704,7 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
686 pci_iounmap(pci_dev, vp_dev->ioaddr); 704 pci_iounmap(pci_dev, vp_dev->ioaddr);
687 pci_release_regions(pci_dev); 705 pci_release_regions(pci_dev);
688 pci_disable_device(pci_dev); 706 pci_disable_device(pci_dev);
707 kfree(vp_dev);
689} 708}
690 709
691#ifdef CONFIG_PM 710#ifdef CONFIG_PM
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 6285867a9356..79fd606b7cd5 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -314,13 +314,6 @@ config NUC900_WATCHDOG
314 To compile this driver as a module, choose M here: the 314 To compile this driver as a module, choose M here: the
315 module will be called nuc900_wdt. 315 module will be called nuc900_wdt.
316 316
317config ADX_WATCHDOG
318 tristate "Avionic Design Xanthos watchdog"
319 depends on ARCH_PXA_ADX
320 help
321 Say Y here if you want support for the watchdog timer on Avionic
322 Design Xanthos boards.
323
324config TS72XX_WATCHDOG 317config TS72XX_WATCHDOG
325 tristate "TS-72XX SBC Watchdog" 318 tristate "TS-72XX SBC Watchdog"
326 depends on MACH_TS72XX 319 depends on MACH_TS72XX
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 55bd5740e910..fe893e91935b 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -51,7 +51,6 @@ obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
51obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o 51obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
52obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o 52obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
53obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o 53obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
54obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o
55obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o 54obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
56obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o 55obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
57 56
diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c
deleted file mode 100644
index af6e6b16475a..000000000000
--- a/drivers/watchdog/adx_wdt.c
+++ /dev/null
@@ -1,355 +0,0 @@
1/*
2 * Copyright (C) 2008-2009 Avionic Design GmbH
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/fs.h>
10#include <linux/gfp.h>
11#include <linux/io.h>
12#include <linux/miscdevice.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/types.h>
16#include <linux/uaccess.h>
17#include <linux/watchdog.h>
18
19#define WATCHDOG_NAME "adx-wdt"
20
21/* register offsets */
22#define ADX_WDT_CONTROL 0x00
23#define ADX_WDT_CONTROL_ENABLE (1 << 0)
24#define ADX_WDT_CONTROL_nRESET (1 << 1)
25#define ADX_WDT_TIMEOUT 0x08
26
27static struct platform_device *adx_wdt_dev;
28static unsigned long driver_open;
29
30#define WDT_STATE_STOP 0
31#define WDT_STATE_START 1
32
33struct adx_wdt {
34 void __iomem *base;
35 unsigned long timeout;
36 unsigned int state;
37 unsigned int wake;
38 spinlock_t lock;
39};
40
41static const struct watchdog_info adx_wdt_info = {
42 .identity = "Avionic Design Xanthos Watchdog",
43 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
44};
45
46static void adx_wdt_start_locked(struct adx_wdt *wdt)
47{
48 u32 ctrl;
49
50 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
51 ctrl |= ADX_WDT_CONTROL_ENABLE;
52 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
53 wdt->state = WDT_STATE_START;
54}
55
56static void adx_wdt_start(struct adx_wdt *wdt)
57{
58 unsigned long flags;
59
60 spin_lock_irqsave(&wdt->lock, flags);
61 adx_wdt_start_locked(wdt);
62 spin_unlock_irqrestore(&wdt->lock, flags);
63}
64
65static void adx_wdt_stop_locked(struct adx_wdt *wdt)
66{
67 u32 ctrl;
68
69 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
70 ctrl &= ~ADX_WDT_CONTROL_ENABLE;
71 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
72 wdt->state = WDT_STATE_STOP;
73}
74
75static void adx_wdt_stop(struct adx_wdt *wdt)
76{
77 unsigned long flags;
78
79 spin_lock_irqsave(&wdt->lock, flags);
80 adx_wdt_stop_locked(wdt);
81 spin_unlock_irqrestore(&wdt->lock, flags);
82}
83
84static void adx_wdt_set_timeout(struct adx_wdt *wdt, unsigned long seconds)
85{
86 unsigned long timeout = seconds * 1000;
87 unsigned long flags;
88 unsigned int state;
89
90 spin_lock_irqsave(&wdt->lock, flags);
91 state = wdt->state;
92 adx_wdt_stop_locked(wdt);
93 writel(timeout, wdt->base + ADX_WDT_TIMEOUT);
94
95 if (state == WDT_STATE_START)
96 adx_wdt_start_locked(wdt);
97
98 wdt->timeout = timeout;
99 spin_unlock_irqrestore(&wdt->lock, flags);
100}
101
102static void adx_wdt_get_timeout(struct adx_wdt *wdt, unsigned long *seconds)
103{
104 *seconds = wdt->timeout / 1000;
105}
106
107static void adx_wdt_keepalive(struct adx_wdt *wdt)
108{
109 unsigned long flags;
110
111 spin_lock_irqsave(&wdt->lock, flags);
112 writel(wdt->timeout, wdt->base + ADX_WDT_TIMEOUT);
113 spin_unlock_irqrestore(&wdt->lock, flags);
114}
115
116static int adx_wdt_open(struct inode *inode, struct file *file)
117{
118 struct adx_wdt *wdt = platform_get_drvdata(adx_wdt_dev);
119
120 if (test_and_set_bit(0, &driver_open))
121 return -EBUSY;
122
123 file->private_data = wdt;
124 adx_wdt_set_timeout(wdt, 30);
125 adx_wdt_start(wdt);
126
127 return nonseekable_open(inode, file);
128}
129
130static int adx_wdt_release(struct inode *inode, struct file *file)
131{
132 struct adx_wdt *wdt = file->private_data;
133
134 adx_wdt_stop(wdt);
135 clear_bit(0, &driver_open);
136
137 return 0;
138}
139
140static long adx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
141{
142 struct adx_wdt *wdt = file->private_data;
143 void __user *argp = (void __user *)arg;
144 unsigned long __user *p = argp;
145 unsigned long seconds = 0;
146 unsigned int options;
147 long ret = -EINVAL;
148
149 switch (cmd) {
150 case WDIOC_GETSUPPORT:
151 if (copy_to_user(argp, &adx_wdt_info, sizeof(adx_wdt_info)))
152 return -EFAULT;
153 else
154 return 0;
155
156 case WDIOC_GETSTATUS:
157 case WDIOC_GETBOOTSTATUS:
158 return put_user(0, p);
159
160 case WDIOC_KEEPALIVE:
161 adx_wdt_keepalive(wdt);
162 return 0;
163
164 case WDIOC_SETTIMEOUT:
165 if (get_user(seconds, p))
166 return -EFAULT;
167
168 adx_wdt_set_timeout(wdt, seconds);
169
170 /* fallthrough */
171 case WDIOC_GETTIMEOUT:
172 adx_wdt_get_timeout(wdt, &seconds);
173 return put_user(seconds, p);
174
175 case WDIOC_SETOPTIONS:
176 if (copy_from_user(&options, argp, sizeof(options)))
177 return -EFAULT;
178
179 if (options & WDIOS_DISABLECARD) {
180 adx_wdt_stop(wdt);
181 ret = 0;
182 }
183
184 if (options & WDIOS_ENABLECARD) {
185 adx_wdt_start(wdt);
186 ret = 0;
187 }
188
189 return ret;
190
191 default:
192 break;
193 }
194
195 return -ENOTTY;
196}
197
198static ssize_t adx_wdt_write(struct file *file, const char __user *data,
199 size_t len, loff_t *ppos)
200{
201 struct adx_wdt *wdt = file->private_data;
202
203 if (len)
204 adx_wdt_keepalive(wdt);
205
206 return len;
207}
208
209static const struct file_operations adx_wdt_fops = {
210 .owner = THIS_MODULE,
211 .llseek = no_llseek,
212 .open = adx_wdt_open,
213 .release = adx_wdt_release,
214 .unlocked_ioctl = adx_wdt_ioctl,
215 .write = adx_wdt_write,
216};
217
218static struct miscdevice adx_wdt_miscdev = {
219 .minor = WATCHDOG_MINOR,
220 .name = "watchdog",
221 .fops = &adx_wdt_fops,
222};
223
224static int __devinit adx_wdt_probe(struct platform_device *pdev)
225{
226 struct resource *res;
227 struct adx_wdt *wdt;
228 int ret = 0;
229 u32 ctrl;
230
231 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
232 if (!wdt) {
233 dev_err(&pdev->dev, "cannot allocate WDT structure\n");
234 return -ENOMEM;
235 }
236
237 spin_lock_init(&wdt->lock);
238
239 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
240 if (!res) {
241 dev_err(&pdev->dev, "cannot obtain I/O memory region\n");
242 return -ENXIO;
243 }
244
245 res = devm_request_mem_region(&pdev->dev, res->start,
246 resource_size(res), res->name);
247 if (!res) {
248 dev_err(&pdev->dev, "cannot request I/O memory region\n");
249 return -ENXIO;
250 }
251
252 wdt->base = devm_ioremap_nocache(&pdev->dev, res->start,
253 resource_size(res));
254 if (!wdt->base) {
255 dev_err(&pdev->dev, "cannot remap I/O memory region\n");
256 return -ENXIO;
257 }
258
259 /* disable watchdog and reboot on timeout */
260 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
261 ctrl &= ~ADX_WDT_CONTROL_ENABLE;
262 ctrl &= ~ADX_WDT_CONTROL_nRESET;
263 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
264
265 platform_set_drvdata(pdev, wdt);
266 adx_wdt_dev = pdev;
267
268 ret = misc_register(&adx_wdt_miscdev);
269 if (ret) {
270 dev_err(&pdev->dev, "cannot register miscdev on minor %d "
271 "(err=%d)\n", WATCHDOG_MINOR, ret);
272 return ret;
273 }
274
275 return 0;
276}
277
278static int __devexit adx_wdt_remove(struct platform_device *pdev)
279{
280 struct adx_wdt *wdt = platform_get_drvdata(pdev);
281
282 misc_deregister(&adx_wdt_miscdev);
283 adx_wdt_stop(wdt);
284 platform_set_drvdata(pdev, NULL);
285
286 return 0;
287}
288
289static void adx_wdt_shutdown(struct platform_device *pdev)
290{
291 struct adx_wdt *wdt = platform_get_drvdata(pdev);
292 adx_wdt_stop(wdt);
293}
294
295#ifdef CONFIG_PM
296static int adx_wdt_suspend(struct device *dev)
297{
298 struct platform_device *pdev = to_platform_device(dev);
299 struct adx_wdt *wdt = platform_get_drvdata(pdev);
300
301 wdt->wake = (wdt->state == WDT_STATE_START) ? 1 : 0;
302 adx_wdt_stop(wdt);
303
304 return 0;
305}
306
307static int adx_wdt_resume(struct device *dev)
308{
309 struct platform_device *pdev = to_platform_device(dev);
310 struct adx_wdt *wdt = platform_get_drvdata(pdev);
311
312 if (wdt->wake)
313 adx_wdt_start(wdt);
314
315 return 0;
316}
317
318static const struct dev_pm_ops adx_wdt_pm_ops = {
319 .suspend = adx_wdt_suspend,
320 .resume = adx_wdt_resume,
321};
322
323# define ADX_WDT_PM_OPS (&adx_wdt_pm_ops)
324#else
325# define ADX_WDT_PM_OPS NULL
326#endif
327
328static struct platform_driver adx_wdt_driver = {
329 .probe = adx_wdt_probe,
330 .remove = __devexit_p(adx_wdt_remove),
331 .shutdown = adx_wdt_shutdown,
332 .driver = {
333 .name = WATCHDOG_NAME,
334 .owner = THIS_MODULE,
335 .pm = ADX_WDT_PM_OPS,
336 },
337};
338
339static int __init adx_wdt_init(void)
340{
341 return platform_driver_register(&adx_wdt_driver);
342}
343
344static void __exit adx_wdt_exit(void)
345{
346 platform_driver_unregister(&adx_wdt_driver);
347}
348
349module_init(adx_wdt_init);
350module_exit(adx_wdt_exit);
351
352MODULE_DESCRIPTION("Avionic Design Xanthos Watchdog Driver");
353MODULE_LICENSE("GPL v2");
354MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
355MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 5de7e4fa5b8a..a79e3840782a 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -401,8 +401,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
401 401
402 dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n", 402 dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n",
403 (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in", 403 (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in",
404 (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis", 404 (wtcon & S3C2410_WTCON_RSTEN) ? "en" : "dis",
405 (wtcon & S3C2410_WTCON_INTEN) ? "" : "en"); 405 (wtcon & S3C2410_WTCON_INTEN) ? "en" : "dis");
406 406
407 return 0; 407 return 0;
408 408
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 7be38556aed0..e789a47db41f 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -150,7 +150,7 @@ static int wm831x_wdt_set_timeout(struct watchdog_device *wdt_dev,
150 if (wm831x_wdt_cfgs[i].time == timeout) 150 if (wm831x_wdt_cfgs[i].time == timeout)
151 break; 151 break;
152 if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) 152 if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
153 ret = -EINVAL; 153 return -EINVAL;
154 154
155 ret = wm831x_reg_unlock(wm831x); 155 ret = wm831x_reg_unlock(wm831x);
156 if (ret == 0) { 156 if (ret == 0) {
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index a767884a6c7a..31ab82fda38a 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -501,7 +501,7 @@ EXPORT_SYMBOL_GPL(balloon_set_new_target);
501 * alloc_xenballooned_pages - get pages that have been ballooned out 501 * alloc_xenballooned_pages - get pages that have been ballooned out
502 * @nr_pages: Number of pages to get 502 * @nr_pages: Number of pages to get
503 * @pages: pages returned 503 * @pages: pages returned
504 * @highmem: highmem or lowmem pages 504 * @highmem: allow highmem pages
505 * @return 0 on success, error otherwise 505 * @return 0 on success, error otherwise
506 */ 506 */
507int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem) 507int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
@@ -511,7 +511,7 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
511 mutex_lock(&balloon_mutex); 511 mutex_lock(&balloon_mutex);
512 while (pgno < nr_pages) { 512 while (pgno < nr_pages) {
513 page = balloon_retrieve(highmem); 513 page = balloon_retrieve(highmem);
514 if (page && PageHighMem(page) == highmem) { 514 if (page && (highmem || !PageHighMem(page))) {
515 pages[pgno++] = page; 515 pages[pgno++] = page;
516 } else { 516 } else {
517 enum bp_state st; 517 enum bp_state st;
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index f6832f46aea4..e1c4c6e5b469 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -135,7 +135,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
135 /* Grant foreign access to the page. */ 135 /* Grant foreign access to the page. */
136 gref->gref_id = gnttab_grant_foreign_access(op->domid, 136 gref->gref_id = gnttab_grant_foreign_access(op->domid,
137 pfn_to_mfn(page_to_pfn(gref->page)), readonly); 137 pfn_to_mfn(page_to_pfn(gref->page)), readonly);
138 if (gref->gref_id < 0) { 138 if ((int)gref->gref_id < 0) {
139 rc = gref->gref_id; 139 rc = gref->gref_id;
140 goto undo; 140 goto undo;
141 } 141 }
@@ -280,7 +280,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
280 goto out; 280 goto out;
281 } 281 }
282 282
283 gref_ids = kzalloc(sizeof(gref_ids[0]) * op.count, GFP_TEMPORARY); 283 gref_ids = kcalloc(op.count, sizeof(gref_ids[0]), GFP_TEMPORARY);
284 if (!gref_ids) { 284 if (!gref_ids) {
285 rc = -ENOMEM; 285 rc = -ENOMEM;
286 goto out; 286 goto out;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 39871326afa2..afca14d9042e 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -114,11 +114,11 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
114 if (NULL == add) 114 if (NULL == add)
115 return NULL; 115 return NULL;
116 116
117 add->grants = kzalloc(sizeof(add->grants[0]) * count, GFP_KERNEL); 117 add->grants = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
118 add->map_ops = kzalloc(sizeof(add->map_ops[0]) * count, GFP_KERNEL); 118 add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
119 add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL); 119 add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
120 add->kmap_ops = kzalloc(sizeof(add->kmap_ops[0]) * count, GFP_KERNEL); 120 add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
121 add->pages = kzalloc(sizeof(add->pages[0]) * count, GFP_KERNEL); 121 add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
122 if (NULL == add->grants || 122 if (NULL == add->grants ||
123 NULL == add->map_ops || 123 NULL == add->map_ops ||
124 NULL == add->unmap_ops || 124 NULL == add->unmap_ops ||
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 4864e5d72e72..19e6a2041371 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -166,7 +166,7 @@ retry:
166 /* 166 /*
167 * Get IO TLB memory from any location. 167 * Get IO TLB memory from any location.
168 */ 168 */
169 xen_io_tlb_start = alloc_bootmem(bytes); 169 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
170 if (!xen_io_tlb_start) { 170 if (!xen_io_tlb_start) {
171 m = "Cannot allocate Xen-SWIOTLB buffer!\n"; 171 m = "Cannot allocate Xen-SWIOTLB buffer!\n";
172 goto error; 172 goto error;
@@ -179,7 +179,7 @@ retry:
179 bytes, 179 bytes,
180 xen_io_tlb_nslabs); 180 xen_io_tlb_nslabs);
181 if (rc) { 181 if (rc) {
182 free_bootmem(__pa(xen_io_tlb_start), bytes); 182 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
183 m = "Failed to get contiguous memory for DMA from Xen!\n"\ 183 m = "Failed to get contiguous memory for DMA from Xen!\n"\
184 "You either: don't have the permissions, do not have"\ 184 "You either: don't have the permissions, do not have"\
185 " enough free memory under 4GB, or the hypervisor memory"\ 185 " enough free memory under 4GB, or the hypervisor memory"\
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 81c3ce6b8bbe..1906125eab49 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -35,6 +35,7 @@
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36#include <linux/export.h> 36#include <linux/export.h>
37#include <asm/xen/hypervisor.h> 37#include <asm/xen/hypervisor.h>
38#include <asm/xen/page.h>
38#include <xen/interface/xen.h> 39#include <xen/interface/xen.h>
39#include <xen/interface/event_channel.h> 40#include <xen/interface/event_channel.h>
40#include <xen/events.h> 41#include <xen/events.h>
@@ -436,19 +437,20 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
436int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) 437int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
437{ 438{
438 struct gnttab_map_grant_ref op = { 439 struct gnttab_map_grant_ref op = {
439 .flags = GNTMAP_host_map, 440 .flags = GNTMAP_host_map | GNTMAP_contains_pte,
440 .ref = gnt_ref, 441 .ref = gnt_ref,
441 .dom = dev->otherend_id, 442 .dom = dev->otherend_id,
442 }; 443 };
443 struct vm_struct *area; 444 struct vm_struct *area;
445 pte_t *pte;
444 446
445 *vaddr = NULL; 447 *vaddr = NULL;
446 448
447 area = alloc_vm_area(PAGE_SIZE); 449 area = alloc_vm_area(PAGE_SIZE, &pte);
448 if (!area) 450 if (!area)
449 return -ENOMEM; 451 return -ENOMEM;
450 452
451 op.host_addr = (unsigned long)area->addr; 453 op.host_addr = arbitrary_virt_to_machine(pte).maddr;
452 454
453 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) 455 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
454 BUG(); 456 BUG();
@@ -527,6 +529,7 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
527 struct gnttab_unmap_grant_ref op = { 529 struct gnttab_unmap_grant_ref op = {
528 .host_addr = (unsigned long)vaddr, 530 .host_addr = (unsigned long)vaddr,
529 }; 531 };
532 unsigned int level;
530 533
531 /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) 534 /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
532 * method so that we don't have to muck with vmalloc internals here. 535 * method so that we don't have to muck with vmalloc internals here.
@@ -548,6 +551,8 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
548 } 551 }
549 552
550 op.handle = (grant_handle_t)area->phys_addr; 553 op.handle = (grant_handle_t)area->phys_addr;
554 op.host_addr = arbitrary_virt_to_machine(
555 lookup_address((unsigned long)vaddr, &level)).maddr;
551 556
552 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) 557 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
553 BUG(); 558 BUG();
diff --git a/fs/bio.c b/fs/bio.c
index 41c93c722244..b1fe82cf88cf 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -337,7 +337,7 @@ static void bio_fs_destructor(struct bio *bio)
337 * RETURNS: 337 * RETURNS:
338 * Pointer to new bio on success, NULL on failure. 338 * Pointer to new bio on success, NULL on failure.
339 */ 339 */
340struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) 340struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
341{ 341{
342 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 342 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
343 343
@@ -365,7 +365,7 @@ static void bio_kmalloc_destructor(struct bio *bio)
365 * %__GFP_WAIT, the allocation is guaranteed to succeed. 365 * %__GFP_WAIT, the allocation is guaranteed to succeed.
366 * 366 *
367 **/ 367 **/
368struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) 368struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
369{ 369{
370 struct bio *bio; 370 struct bio *bio;
371 371
@@ -696,7 +696,8 @@ static void bio_free_map_data(struct bio_map_data *bmd)
696 kfree(bmd); 696 kfree(bmd);
697} 697}
698 698
699static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, 699static struct bio_map_data *bio_alloc_map_data(int nr_segs,
700 unsigned int iov_count,
700 gfp_t gfp_mask) 701 gfp_t gfp_mask)
701{ 702{
702 struct bio_map_data *bmd; 703 struct bio_map_data *bmd;
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 7ec14097fef1..cb97174e2366 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -64,6 +64,8 @@ struct btrfs_worker_thread {
64 int idle; 64 int idle;
65}; 65};
66 66
67static int __btrfs_start_workers(struct btrfs_workers *workers);
68
67/* 69/*
68 * btrfs_start_workers uses kthread_run, which can block waiting for memory 70 * btrfs_start_workers uses kthread_run, which can block waiting for memory
69 * for a very long time. It will actually throttle on page writeback, 71 * for a very long time. It will actually throttle on page writeback,
@@ -88,27 +90,10 @@ static void start_new_worker_func(struct btrfs_work *work)
88{ 90{
89 struct worker_start *start; 91 struct worker_start *start;
90 start = container_of(work, struct worker_start, work); 92 start = container_of(work, struct worker_start, work);
91 btrfs_start_workers(start->queue, 1); 93 __btrfs_start_workers(start->queue);
92 kfree(start); 94 kfree(start);
93} 95}
94 96
95static int start_new_worker(struct btrfs_workers *queue)
96{
97 struct worker_start *start;
98 int ret;
99
100 start = kzalloc(sizeof(*start), GFP_NOFS);
101 if (!start)
102 return -ENOMEM;
103
104 start->work.func = start_new_worker_func;
105 start->queue = queue;
106 ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
107 if (ret)
108 kfree(start);
109 return ret;
110}
111
112/* 97/*
113 * helper function to move a thread onto the idle list after it 98 * helper function to move a thread onto the idle list after it
114 * has finished some requests. 99 * has finished some requests.
@@ -153,12 +138,20 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
153static void check_pending_worker_creates(struct btrfs_worker_thread *worker) 138static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
154{ 139{
155 struct btrfs_workers *workers = worker->workers; 140 struct btrfs_workers *workers = worker->workers;
141 struct worker_start *start;
156 unsigned long flags; 142 unsigned long flags;
157 143
158 rmb(); 144 rmb();
159 if (!workers->atomic_start_pending) 145 if (!workers->atomic_start_pending)
160 return; 146 return;
161 147
148 start = kzalloc(sizeof(*start), GFP_NOFS);
149 if (!start)
150 return;
151
152 start->work.func = start_new_worker_func;
153 start->queue = workers;
154
162 spin_lock_irqsave(&workers->lock, flags); 155 spin_lock_irqsave(&workers->lock, flags);
163 if (!workers->atomic_start_pending) 156 if (!workers->atomic_start_pending)
164 goto out; 157 goto out;
@@ -170,10 +163,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
170 163
171 workers->num_workers_starting += 1; 164 workers->num_workers_starting += 1;
172 spin_unlock_irqrestore(&workers->lock, flags); 165 spin_unlock_irqrestore(&workers->lock, flags);
173 start_new_worker(workers); 166 btrfs_queue_worker(workers->atomic_worker_start, &start->work);
174 return; 167 return;
175 168
176out: 169out:
170 kfree(start);
177 spin_unlock_irqrestore(&workers->lock, flags); 171 spin_unlock_irqrestore(&workers->lock, flags);
178} 172}
179 173
@@ -331,7 +325,7 @@ again:
331 run_ordered_completions(worker->workers, work); 325 run_ordered_completions(worker->workers, work);
332 326
333 check_pending_worker_creates(worker); 327 check_pending_worker_creates(worker);
334 328 cond_resched();
335 } 329 }
336 330
337 spin_lock_irq(&worker->lock); 331 spin_lock_irq(&worker->lock);
@@ -462,56 +456,55 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
462 * starts new worker threads. This does not enforce the max worker 456 * starts new worker threads. This does not enforce the max worker
463 * count in case you need to temporarily go past it. 457 * count in case you need to temporarily go past it.
464 */ 458 */
465static int __btrfs_start_workers(struct btrfs_workers *workers, 459static int __btrfs_start_workers(struct btrfs_workers *workers)
466 int num_workers)
467{ 460{
468 struct btrfs_worker_thread *worker; 461 struct btrfs_worker_thread *worker;
469 int ret = 0; 462 int ret = 0;
470 int i;
471 463
472 for (i = 0; i < num_workers; i++) { 464 worker = kzalloc(sizeof(*worker), GFP_NOFS);
473 worker = kzalloc(sizeof(*worker), GFP_NOFS); 465 if (!worker) {
474 if (!worker) { 466 ret = -ENOMEM;
475 ret = -ENOMEM; 467 goto fail;
476 goto fail; 468 }
477 }
478 469
479 INIT_LIST_HEAD(&worker->pending); 470 INIT_LIST_HEAD(&worker->pending);
480 INIT_LIST_HEAD(&worker->prio_pending); 471 INIT_LIST_HEAD(&worker->prio_pending);
481 INIT_LIST_HEAD(&worker->worker_list); 472 INIT_LIST_HEAD(&worker->worker_list);
482 spin_lock_init(&worker->lock); 473 spin_lock_init(&worker->lock);
483 474
484 atomic_set(&worker->num_pending, 0); 475 atomic_set(&worker->num_pending, 0);
485 atomic_set(&worker->refs, 1); 476 atomic_set(&worker->refs, 1);
486 worker->workers = workers; 477 worker->workers = workers;
487 worker->task = kthread_run(worker_loop, worker, 478 worker->task = kthread_run(worker_loop, worker,
488 "btrfs-%s-%d", workers->name, 479 "btrfs-%s-%d", workers->name,
489 workers->num_workers + i); 480 workers->num_workers + 1);
490 if (IS_ERR(worker->task)) { 481 if (IS_ERR(worker->task)) {
491 ret = PTR_ERR(worker->task); 482 ret = PTR_ERR(worker->task);
492 kfree(worker); 483 kfree(worker);
493 goto fail; 484 goto fail;
494 }
495 spin_lock_irq(&workers->lock);
496 list_add_tail(&worker->worker_list, &workers->idle_list);
497 worker->idle = 1;
498 workers->num_workers++;
499 workers->num_workers_starting--;
500 WARN_ON(workers->num_workers_starting < 0);
501 spin_unlock_irq(&workers->lock);
502 } 485 }
486 spin_lock_irq(&workers->lock);
487 list_add_tail(&worker->worker_list, &workers->idle_list);
488 worker->idle = 1;
489 workers->num_workers++;
490 workers->num_workers_starting--;
491 WARN_ON(workers->num_workers_starting < 0);
492 spin_unlock_irq(&workers->lock);
493
503 return 0; 494 return 0;
504fail: 495fail:
505 btrfs_stop_workers(workers); 496 spin_lock_irq(&workers->lock);
497 workers->num_workers_starting--;
498 spin_unlock_irq(&workers->lock);
506 return ret; 499 return ret;
507} 500}
508 501
509int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) 502int btrfs_start_workers(struct btrfs_workers *workers)
510{ 503{
511 spin_lock_irq(&workers->lock); 504 spin_lock_irq(&workers->lock);
512 workers->num_workers_starting += num_workers; 505 workers->num_workers_starting++;
513 spin_unlock_irq(&workers->lock); 506 spin_unlock_irq(&workers->lock);
514 return __btrfs_start_workers(workers, num_workers); 507 return __btrfs_start_workers(workers);
515} 508}
516 509
517/* 510/*
@@ -568,6 +561,7 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
568 struct btrfs_worker_thread *worker; 561 struct btrfs_worker_thread *worker;
569 unsigned long flags; 562 unsigned long flags;
570 struct list_head *fallback; 563 struct list_head *fallback;
564 int ret;
571 565
572again: 566again:
573 spin_lock_irqsave(&workers->lock, flags); 567 spin_lock_irqsave(&workers->lock, flags);
@@ -584,7 +578,9 @@ again:
584 workers->num_workers_starting++; 578 workers->num_workers_starting++;
585 spin_unlock_irqrestore(&workers->lock, flags); 579 spin_unlock_irqrestore(&workers->lock, flags);
586 /* we're below the limit, start another worker */ 580 /* we're below the limit, start another worker */
587 __btrfs_start_workers(workers, 1); 581 ret = __btrfs_start_workers(workers);
582 if (ret)
583 goto fallback;
588 goto again; 584 goto again;
589 } 585 }
590 } 586 }
@@ -665,7 +661,7 @@ void btrfs_set_work_high_prio(struct btrfs_work *work)
665/* 661/*
666 * places a struct btrfs_work into the pending queue of one of the kthreads 662 * places a struct btrfs_work into the pending queue of one of the kthreads
667 */ 663 */
668int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) 664void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
669{ 665{
670 struct btrfs_worker_thread *worker; 666 struct btrfs_worker_thread *worker;
671 unsigned long flags; 667 unsigned long flags;
@@ -673,7 +669,7 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
673 669
674 /* don't requeue something already on a list */ 670 /* don't requeue something already on a list */
675 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) 671 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
676 goto out; 672 return;
677 673
678 worker = find_worker(workers); 674 worker = find_worker(workers);
679 if (workers->ordered) { 675 if (workers->ordered) {
@@ -712,7 +708,4 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
712 if (wake) 708 if (wake)
713 wake_up_process(worker->task); 709 wake_up_process(worker->task);
714 spin_unlock_irqrestore(&worker->lock, flags); 710 spin_unlock_irqrestore(&worker->lock, flags);
715
716out:
717 return 0;
718} 711}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 5077746cf85e..f34cc31fa3c9 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -109,8 +109,8 @@ struct btrfs_workers {
109 char *name; 109 char *name;
110}; 110};
111 111
112int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work); 112void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
113int btrfs_start_workers(struct btrfs_workers *workers, int num_workers); 113int btrfs_start_workers(struct btrfs_workers *workers);
114int btrfs_stop_workers(struct btrfs_workers *workers); 114int btrfs_stop_workers(struct btrfs_workers *workers);
115void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max, 115void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
116 struct btrfs_workers *async_starter); 116 struct btrfs_workers *async_starter);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 8855aad3929c..22c64fff1bd5 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -683,7 +683,7 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
683 return PTR_ERR(fspath); 683 return PTR_ERR(fspath);
684 684
685 if (fspath > fspath_min) { 685 if (fspath > fspath_min) {
686 ipath->fspath->val[i] = (u64)fspath; 686 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
687 ++ipath->fspath->elem_cnt; 687 ++ipath->fspath->elem_cnt;
688 ipath->fspath->bytes_left = fspath - fspath_min; 688 ipath->fspath->bytes_left = fspath - fspath_min;
689 } else { 689 } else {
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 5a5d325a3935..634608d2a6d0 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -147,14 +147,12 @@ struct btrfs_inode {
147 * the btrfs file release call will add this inode to the 147 * the btrfs file release call will add this inode to the
148 * ordered operations list so that we make sure to flush out any 148 * ordered operations list so that we make sure to flush out any
149 * new data the application may have written before commit. 149 * new data the application may have written before commit.
150 *
151 * yes, its silly to have a single bitflag, but we might grow more
152 * of these.
153 */ 150 */
154 unsigned ordered_data_close:1; 151 unsigned ordered_data_close:1;
155 unsigned orphan_meta_reserved:1; 152 unsigned orphan_meta_reserved:1;
156 unsigned dummy_inode:1; 153 unsigned dummy_inode:1;
157 unsigned in_defrag:1; 154 unsigned in_defrag:1;
155 unsigned delalloc_meta_reserved:1;
158 156
159 /* 157 /*
160 * always compress this one file 158 * always compress this one file
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0fe615e4ea38..dede441bdeee 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -514,10 +514,25 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
514 struct btrfs_root *root, 514 struct btrfs_root *root,
515 struct extent_buffer *buf) 515 struct extent_buffer *buf)
516{ 516{
517 /* ensure we can see the force_cow */
518 smp_rmb();
519
520 /*
521 * We do not need to cow a block if
522 * 1) this block is not created or changed in this transaction;
523 * 2) this block does not belong to TREE_RELOC tree;
524 * 3) the root is not forced COW.
525 *
526 * What is forced COW:
527 * when we create snapshot during commiting the transaction,
528 * after we've finished coping src root, we must COW the shared
529 * block to ensure the metadata consistency.
530 */
517 if (btrfs_header_generation(buf) == trans->transid && 531 if (btrfs_header_generation(buf) == trans->transid &&
518 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 532 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
519 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 533 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
520 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 534 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
535 !root->force_cow)
521 return 0; 536 return 0;
522 return 1; 537 return 1;
523} 538}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index b9ba59ff9292..67385033323d 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -848,7 +848,8 @@ struct btrfs_free_cluster {
848enum btrfs_caching_type { 848enum btrfs_caching_type {
849 BTRFS_CACHE_NO = 0, 849 BTRFS_CACHE_NO = 0,
850 BTRFS_CACHE_STARTED = 1, 850 BTRFS_CACHE_STARTED = 1,
851 BTRFS_CACHE_FINISHED = 2, 851 BTRFS_CACHE_FAST = 2,
852 BTRFS_CACHE_FINISHED = 3,
852}; 853};
853 854
854enum btrfs_disk_cache_state { 855enum btrfs_disk_cache_state {
@@ -1271,6 +1272,8 @@ struct btrfs_root {
1271 * for stat. It may be used for more later 1272 * for stat. It may be used for more later
1272 */ 1273 */
1273 dev_t anon_dev; 1274 dev_t anon_dev;
1275
1276 int force_cow;
1274}; 1277};
1275 1278
1276struct btrfs_ioctl_defrag_range_args { 1279struct btrfs_ioctl_defrag_range_args {
@@ -2366,6 +2369,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
2366int btrfs_block_rsv_refill(struct btrfs_root *root, 2369int btrfs_block_rsv_refill(struct btrfs_root *root,
2367 struct btrfs_block_rsv *block_rsv, 2370 struct btrfs_block_rsv *block_rsv,
2368 u64 min_reserved); 2371 u64 min_reserved);
2372int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
2373 struct btrfs_block_rsv *block_rsv,
2374 u64 min_reserved);
2369int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 2375int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
2370 struct btrfs_block_rsv *dst_rsv, 2376 struct btrfs_block_rsv *dst_rsv,
2371 u64 num_bytes); 2377 u64 num_bytes);
@@ -2686,7 +2692,8 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
2686int btrfs_readpage(struct file *file, struct page *page); 2692int btrfs_readpage(struct file *file, struct page *page);
2687void btrfs_evict_inode(struct inode *inode); 2693void btrfs_evict_inode(struct inode *inode);
2688int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); 2694int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
2689void btrfs_dirty_inode(struct inode *inode, int flags); 2695int btrfs_dirty_inode(struct inode *inode);
2696int btrfs_update_time(struct file *file);
2690struct inode *btrfs_alloc_inode(struct super_block *sb); 2697struct inode *btrfs_alloc_inode(struct super_block *sb);
2691void btrfs_destroy_inode(struct inode *inode); 2698void btrfs_destroy_inode(struct inode *inode);
2692int btrfs_drop_inode(struct inode *inode); 2699int btrfs_drop_inode(struct inode *inode);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 3a1b939c9ae2..9c1eccc2c503 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -617,12 +617,14 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
617static int btrfs_delayed_inode_reserve_metadata( 617static int btrfs_delayed_inode_reserve_metadata(
618 struct btrfs_trans_handle *trans, 618 struct btrfs_trans_handle *trans,
619 struct btrfs_root *root, 619 struct btrfs_root *root,
620 struct inode *inode,
620 struct btrfs_delayed_node *node) 621 struct btrfs_delayed_node *node)
621{ 622{
622 struct btrfs_block_rsv *src_rsv; 623 struct btrfs_block_rsv *src_rsv;
623 struct btrfs_block_rsv *dst_rsv; 624 struct btrfs_block_rsv *dst_rsv;
624 u64 num_bytes; 625 u64 num_bytes;
625 int ret; 626 int ret;
627 int release = false;
626 628
627 src_rsv = trans->block_rsv; 629 src_rsv = trans->block_rsv;
628 dst_rsv = &root->fs_info->delayed_block_rsv; 630 dst_rsv = &root->fs_info->delayed_block_rsv;
@@ -638,8 +640,8 @@ static int btrfs_delayed_inode_reserve_metadata(
638 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since 640 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
639 * we're accounted for. 641 * we're accounted for.
640 */ 642 */
641 if (!trans->bytes_reserved && 643 if (!src_rsv || (!trans->bytes_reserved &&
642 src_rsv != &root->fs_info->delalloc_block_rsv) { 644 src_rsv != &root->fs_info->delalloc_block_rsv)) {
643 ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes); 645 ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
644 /* 646 /*
645 * Since we're under a transaction reserve_metadata_bytes could 647 * Since we're under a transaction reserve_metadata_bytes could
@@ -652,12 +654,65 @@ static int btrfs_delayed_inode_reserve_metadata(
652 if (!ret) 654 if (!ret)
653 node->bytes_reserved = num_bytes; 655 node->bytes_reserved = num_bytes;
654 return ret; 656 return ret;
657 } else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
658 spin_lock(&BTRFS_I(inode)->lock);
659 if (BTRFS_I(inode)->delalloc_meta_reserved) {
660 BTRFS_I(inode)->delalloc_meta_reserved = 0;
661 spin_unlock(&BTRFS_I(inode)->lock);
662 release = true;
663 goto migrate;
664 }
665 spin_unlock(&BTRFS_I(inode)->lock);
666
667 /* Ok we didn't have space pre-reserved. This shouldn't happen
668 * too often but it can happen if we do delalloc to an existing
669 * inode which gets dirtied because of the time update, and then
670 * isn't touched again until after the transaction commits and
671 * then we try to write out the data. First try to be nice and
672 * reserve something strictly for us. If not be a pain and try
673 * to steal from the delalloc block rsv.
674 */
675 ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
676 if (!ret)
677 goto out;
678
679 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
680 if (!ret)
681 goto out;
682
683 /*
684 * Ok this is a problem, let's just steal from the global rsv
685 * since this really shouldn't happen that often.
686 */
687 WARN_ON(1);
688 ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
689 dst_rsv, num_bytes);
690 goto out;
655 } 691 }
656 692
693migrate:
657 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); 694 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
695
696out:
697 /*
698 * Migrate only takes a reservation, it doesn't touch the size of the
699 * block_rsv. This is to simplify people who don't normally have things
700 * migrated from their block rsv. If they go to release their
701 * reservation, that will decrease the size as well, so if migrate
702 * reduced size we'd end up with a negative size. But for the
703 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
704 * but we could in fact do this reserve/migrate dance several times
705 * between the time we did the original reservation and we'd clean it
706 * up. So to take care of this, release the space for the meta
707 * reservation here. I think it may be time for a documentation page on
708 * how block rsvs. work.
709 */
658 if (!ret) 710 if (!ret)
659 node->bytes_reserved = num_bytes; 711 node->bytes_reserved = num_bytes;
660 712
713 if (release)
714 btrfs_block_rsv_release(root, src_rsv, num_bytes);
715
661 return ret; 716 return ret;
662} 717}
663 718
@@ -1708,7 +1763,8 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1708 goto release_node; 1763 goto release_node;
1709 } 1764 }
1710 1765
1711 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); 1766 ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1767 delayed_node);
1712 if (ret) 1768 if (ret)
1713 goto release_node; 1769 goto release_node;
1714 1770
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 102c176fc29c..f44b3928dc2d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -620,7 +620,7 @@ out:
620 620
621static int btree_io_failed_hook(struct bio *failed_bio, 621static int btree_io_failed_hook(struct bio *failed_bio,
622 struct page *page, u64 start, u64 end, 622 struct page *page, u64 start, u64 end,
623 u64 mirror_num, struct extent_state *state) 623 int mirror_num, struct extent_state *state)
624{ 624{
625 struct extent_io_tree *tree; 625 struct extent_io_tree *tree;
626 unsigned long len; 626 unsigned long len;
@@ -1890,31 +1890,32 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1890 u64 features; 1890 u64 features;
1891 struct btrfs_key location; 1891 struct btrfs_key location;
1892 struct buffer_head *bh; 1892 struct buffer_head *bh;
1893 struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root), 1893 struct btrfs_super_block *disk_super;
1894 GFP_NOFS);
1895 struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1896 GFP_NOFS);
1897 struct btrfs_root *tree_root = btrfs_sb(sb); 1894 struct btrfs_root *tree_root = btrfs_sb(sb);
1898 struct btrfs_fs_info *fs_info = NULL; 1895 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1899 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), 1896 struct btrfs_root *extent_root;
1900 GFP_NOFS); 1897 struct btrfs_root *csum_root;
1901 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), 1898 struct btrfs_root *chunk_root;
1902 GFP_NOFS); 1899 struct btrfs_root *dev_root;
1903 struct btrfs_root *log_tree_root; 1900 struct btrfs_root *log_tree_root;
1904
1905 int ret; 1901 int ret;
1906 int err = -EINVAL; 1902 int err = -EINVAL;
1907 int num_backups_tried = 0; 1903 int num_backups_tried = 0;
1908 int backup_index = 0; 1904 int backup_index = 0;
1909 1905
1910 struct btrfs_super_block *disk_super; 1906 extent_root = fs_info->extent_root =
1907 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1908 csum_root = fs_info->csum_root =
1909 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1910 chunk_root = fs_info->chunk_root =
1911 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1912 dev_root = fs_info->dev_root =
1913 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1911 1914
1912 if (!extent_root || !tree_root || !tree_root->fs_info || 1915 if (!extent_root || !csum_root || !chunk_root || !dev_root) {
1913 !chunk_root || !dev_root || !csum_root) {
1914 err = -ENOMEM; 1916 err = -ENOMEM;
1915 goto fail; 1917 goto fail;
1916 } 1918 }
1917 fs_info = tree_root->fs_info;
1918 1919
1919 ret = init_srcu_struct(&fs_info->subvol_srcu); 1920 ret = init_srcu_struct(&fs_info->subvol_srcu);
1920 if (ret) { 1921 if (ret) {
@@ -1954,12 +1955,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1954 mutex_init(&fs_info->reloc_mutex); 1955 mutex_init(&fs_info->reloc_mutex);
1955 1956
1956 init_completion(&fs_info->kobj_unregister); 1957 init_completion(&fs_info->kobj_unregister);
1957 fs_info->tree_root = tree_root;
1958 fs_info->extent_root = extent_root;
1959 fs_info->csum_root = csum_root;
1960 fs_info->chunk_root = chunk_root;
1961 fs_info->dev_root = dev_root;
1962 fs_info->fs_devices = fs_devices;
1963 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 1958 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1964 INIT_LIST_HEAD(&fs_info->space_info); 1959 INIT_LIST_HEAD(&fs_info->space_info);
1965 btrfs_mapping_init(&fs_info->mapping_tree); 1960 btrfs_mapping_init(&fs_info->mapping_tree);
@@ -2199,19 +2194,27 @@ struct btrfs_root *open_ctree(struct super_block *sb,
2199 fs_info->endio_meta_write_workers.idle_thresh = 2; 2194 fs_info->endio_meta_write_workers.idle_thresh = 2;
2200 fs_info->readahead_workers.idle_thresh = 2; 2195 fs_info->readahead_workers.idle_thresh = 2;
2201 2196
2202 btrfs_start_workers(&fs_info->workers, 1); 2197 /*
2203 btrfs_start_workers(&fs_info->generic_worker, 1); 2198 * btrfs_start_workers can really only fail because of ENOMEM so just
2204 btrfs_start_workers(&fs_info->submit_workers, 1); 2199 * return -ENOMEM if any of these fail.
2205 btrfs_start_workers(&fs_info->delalloc_workers, 1); 2200 */
2206 btrfs_start_workers(&fs_info->fixup_workers, 1); 2201 ret = btrfs_start_workers(&fs_info->workers);
2207 btrfs_start_workers(&fs_info->endio_workers, 1); 2202 ret |= btrfs_start_workers(&fs_info->generic_worker);
2208 btrfs_start_workers(&fs_info->endio_meta_workers, 1); 2203 ret |= btrfs_start_workers(&fs_info->submit_workers);
2209 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); 2204 ret |= btrfs_start_workers(&fs_info->delalloc_workers);
2210 btrfs_start_workers(&fs_info->endio_write_workers, 1); 2205 ret |= btrfs_start_workers(&fs_info->fixup_workers);
2211 btrfs_start_workers(&fs_info->endio_freespace_worker, 1); 2206 ret |= btrfs_start_workers(&fs_info->endio_workers);
2212 btrfs_start_workers(&fs_info->delayed_workers, 1); 2207 ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
2213 btrfs_start_workers(&fs_info->caching_workers, 1); 2208 ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
2214 btrfs_start_workers(&fs_info->readahead_workers, 1); 2209 ret |= btrfs_start_workers(&fs_info->endio_write_workers);
2210 ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
2211 ret |= btrfs_start_workers(&fs_info->delayed_workers);
2212 ret |= btrfs_start_workers(&fs_info->caching_workers);
2213 ret |= btrfs_start_workers(&fs_info->readahead_workers);
2214 if (ret) {
2215 ret = -ENOMEM;
2216 goto fail_sb_buffer;
2217 }
2215 2218
2216 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); 2219 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2217 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 2220 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -2465,21 +2468,20 @@ fail_sb_buffer:
2465 btrfs_stop_workers(&fs_info->caching_workers); 2468 btrfs_stop_workers(&fs_info->caching_workers);
2466fail_alloc: 2469fail_alloc:
2467fail_iput: 2470fail_iput:
2471 btrfs_mapping_tree_free(&fs_info->mapping_tree);
2472
2468 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 2473 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2469 iput(fs_info->btree_inode); 2474 iput(fs_info->btree_inode);
2470
2471 btrfs_close_devices(fs_info->fs_devices);
2472 btrfs_mapping_tree_free(&fs_info->mapping_tree);
2473fail_bdi: 2475fail_bdi:
2474 bdi_destroy(&fs_info->bdi); 2476 bdi_destroy(&fs_info->bdi);
2475fail_srcu: 2477fail_srcu:
2476 cleanup_srcu_struct(&fs_info->subvol_srcu); 2478 cleanup_srcu_struct(&fs_info->subvol_srcu);
2477fail: 2479fail:
2480 btrfs_close_devices(fs_info->fs_devices);
2478 free_fs_info(fs_info); 2481 free_fs_info(fs_info);
2479 return ERR_PTR(err); 2482 return ERR_PTR(err);
2480 2483
2481recovery_tree_root: 2484recovery_tree_root:
2482
2483 if (!btrfs_test_opt(tree_root, RECOVERY)) 2485 if (!btrfs_test_opt(tree_root, RECOVERY))
2484 goto fail_tree_roots; 2486 goto fail_tree_roots;
2485 2487
@@ -2579,22 +2581,10 @@ static int write_dev_supers(struct btrfs_device *device,
2579 int errors = 0; 2581 int errors = 0;
2580 u32 crc; 2582 u32 crc;
2581 u64 bytenr; 2583 u64 bytenr;
2582 int last_barrier = 0;
2583 2584
2584 if (max_mirrors == 0) 2585 if (max_mirrors == 0)
2585 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 2586 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2586 2587
2587 /* make sure only the last submit_bh does a barrier */
2588 if (do_barriers) {
2589 for (i = 0; i < max_mirrors; i++) {
2590 bytenr = btrfs_sb_offset(i);
2591 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
2592 device->total_bytes)
2593 break;
2594 last_barrier = i;
2595 }
2596 }
2597
2598 for (i = 0; i < max_mirrors; i++) { 2588 for (i = 0; i < max_mirrors; i++) {
2599 bytenr = btrfs_sb_offset(i); 2589 bytenr = btrfs_sb_offset(i);
2600 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) 2590 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
@@ -2640,17 +2630,136 @@ static int write_dev_supers(struct btrfs_device *device,
2640 bh->b_end_io = btrfs_end_buffer_write_sync; 2630 bh->b_end_io = btrfs_end_buffer_write_sync;
2641 } 2631 }
2642 2632
2643 if (i == last_barrier && do_barriers) 2633 /*
2644 ret = submit_bh(WRITE_FLUSH_FUA, bh); 2634 * we fua the first super. The others we allow
2645 else 2635 * to go down lazy.
2646 ret = submit_bh(WRITE_SYNC, bh); 2636 */
2647 2637 ret = submit_bh(WRITE_FUA, bh);
2648 if (ret) 2638 if (ret)
2649 errors++; 2639 errors++;
2650 } 2640 }
2651 return errors < i ? 0 : -1; 2641 return errors < i ? 0 : -1;
2652} 2642}
2653 2643
2644/*
2645 * endio for the write_dev_flush, this will wake anyone waiting
2646 * for the barrier when it is done
2647 */
2648static void btrfs_end_empty_barrier(struct bio *bio, int err)
2649{
2650 if (err) {
2651 if (err == -EOPNOTSUPP)
2652 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2653 clear_bit(BIO_UPTODATE, &bio->bi_flags);
2654 }
2655 if (bio->bi_private)
2656 complete(bio->bi_private);
2657 bio_put(bio);
2658}
2659
2660/*
2661 * trigger flushes for one the devices. If you pass wait == 0, the flushes are
2662 * sent down. With wait == 1, it waits for the previous flush.
2663 *
2664 * any device where the flush fails with eopnotsupp are flagged as not-barrier
2665 * capable
2666 */
2667static int write_dev_flush(struct btrfs_device *device, int wait)
2668{
2669 struct bio *bio;
2670 int ret = 0;
2671
2672 if (device->nobarriers)
2673 return 0;
2674
2675 if (wait) {
2676 bio = device->flush_bio;
2677 if (!bio)
2678 return 0;
2679
2680 wait_for_completion(&device->flush_wait);
2681
2682 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
2683 printk("btrfs: disabling barriers on dev %s\n",
2684 device->name);
2685 device->nobarriers = 1;
2686 }
2687 if (!bio_flagged(bio, BIO_UPTODATE)) {
2688 ret = -EIO;
2689 }
2690
2691 /* drop the reference from the wait == 0 run */
2692 bio_put(bio);
2693 device->flush_bio = NULL;
2694
2695 return ret;
2696 }
2697
2698 /*
2699 * one reference for us, and we leave it for the
2700 * caller
2701 */
2702 device->flush_bio = NULL;;
2703 bio = bio_alloc(GFP_NOFS, 0);
2704 if (!bio)
2705 return -ENOMEM;
2706
2707 bio->bi_end_io = btrfs_end_empty_barrier;
2708 bio->bi_bdev = device->bdev;
2709 init_completion(&device->flush_wait);
2710 bio->bi_private = &device->flush_wait;
2711 device->flush_bio = bio;
2712
2713 bio_get(bio);
2714 submit_bio(WRITE_FLUSH, bio);
2715
2716 return 0;
2717}
2718
2719/*
2720 * send an empty flush down to each device in parallel,
2721 * then wait for them
2722 */
2723static int barrier_all_devices(struct btrfs_fs_info *info)
2724{
2725 struct list_head *head;
2726 struct btrfs_device *dev;
2727 int errors = 0;
2728 int ret;
2729
2730 /* send down all the barriers */
2731 head = &info->fs_devices->devices;
2732 list_for_each_entry_rcu(dev, head, dev_list) {
2733 if (!dev->bdev) {
2734 errors++;
2735 continue;
2736 }
2737 if (!dev->in_fs_metadata || !dev->writeable)
2738 continue;
2739
2740 ret = write_dev_flush(dev, 0);
2741 if (ret)
2742 errors++;
2743 }
2744
2745 /* wait for all the barriers */
2746 list_for_each_entry_rcu(dev, head, dev_list) {
2747 if (!dev->bdev) {
2748 errors++;
2749 continue;
2750 }
2751 if (!dev->in_fs_metadata || !dev->writeable)
2752 continue;
2753
2754 ret = write_dev_flush(dev, 1);
2755 if (ret)
2756 errors++;
2757 }
2758 if (errors)
2759 return -EIO;
2760 return 0;
2761}
2762
2654int write_all_supers(struct btrfs_root *root, int max_mirrors) 2763int write_all_supers(struct btrfs_root *root, int max_mirrors)
2655{ 2764{
2656 struct list_head *head; 2765 struct list_head *head;
@@ -2672,6 +2781,10 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2672 2781
2673 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2782 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2674 head = &root->fs_info->fs_devices->devices; 2783 head = &root->fs_info->fs_devices->devices;
2784
2785 if (do_barriers)
2786 barrier_all_devices(root->fs_info);
2787
2675 list_for_each_entry_rcu(dev, head, dev_list) { 2788 list_for_each_entry_rcu(dev, head, dev_list) {
2676 if (!dev->bdev) { 2789 if (!dev->bdev) {
2677 total_errors++; 2790 total_errors++;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9879bd474632..f5fbe576d2ba 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -467,13 +467,59 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
467 struct btrfs_root *root, 467 struct btrfs_root *root,
468 int load_cache_only) 468 int load_cache_only)
469{ 469{
470 DEFINE_WAIT(wait);
470 struct btrfs_fs_info *fs_info = cache->fs_info; 471 struct btrfs_fs_info *fs_info = cache->fs_info;
471 struct btrfs_caching_control *caching_ctl; 472 struct btrfs_caching_control *caching_ctl;
472 int ret = 0; 473 int ret = 0;
473 474
474 smp_mb(); 475 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
475 if (cache->cached != BTRFS_CACHE_NO) 476 BUG_ON(!caching_ctl);
477
478 INIT_LIST_HEAD(&caching_ctl->list);
479 mutex_init(&caching_ctl->mutex);
480 init_waitqueue_head(&caching_ctl->wait);
481 caching_ctl->block_group = cache;
482 caching_ctl->progress = cache->key.objectid;
483 atomic_set(&caching_ctl->count, 1);
484 caching_ctl->work.func = caching_thread;
485
486 spin_lock(&cache->lock);
487 /*
488 * This should be a rare occasion, but this could happen I think in the
489 * case where one thread starts to load the space cache info, and then
490 * some other thread starts a transaction commit which tries to do an
491 * allocation while the other thread is still loading the space cache
492 * info. The previous loop should have kept us from choosing this block
493 * group, but if we've moved to the state where we will wait on caching
494 * block groups we need to first check if we're doing a fast load here,
495 * so we can wait for it to finish, otherwise we could end up allocating
496 * from a block group who's cache gets evicted for one reason or
497 * another.
498 */
499 while (cache->cached == BTRFS_CACHE_FAST) {
500 struct btrfs_caching_control *ctl;
501
502 ctl = cache->caching_ctl;
503 atomic_inc(&ctl->count);
504 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
505 spin_unlock(&cache->lock);
506
507 schedule();
508
509 finish_wait(&ctl->wait, &wait);
510 put_caching_control(ctl);
511 spin_lock(&cache->lock);
512 }
513
514 if (cache->cached != BTRFS_CACHE_NO) {
515 spin_unlock(&cache->lock);
516 kfree(caching_ctl);
476 return 0; 517 return 0;
518 }
519 WARN_ON(cache->caching_ctl);
520 cache->caching_ctl = caching_ctl;
521 cache->cached = BTRFS_CACHE_FAST;
522 spin_unlock(&cache->lock);
477 523
478 /* 524 /*
479 * We can't do the read from on-disk cache during a commit since we need 525 * We can't do the read from on-disk cache during a commit since we need
@@ -484,56 +530,51 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
484 if (trans && (!trans->transaction->in_commit) && 530 if (trans && (!trans->transaction->in_commit) &&
485 (root && root != root->fs_info->tree_root) && 531 (root && root != root->fs_info->tree_root) &&
486 btrfs_test_opt(root, SPACE_CACHE)) { 532 btrfs_test_opt(root, SPACE_CACHE)) {
487 spin_lock(&cache->lock);
488 if (cache->cached != BTRFS_CACHE_NO) {
489 spin_unlock(&cache->lock);
490 return 0;
491 }
492 cache->cached = BTRFS_CACHE_STARTED;
493 spin_unlock(&cache->lock);
494
495 ret = load_free_space_cache(fs_info, cache); 533 ret = load_free_space_cache(fs_info, cache);
496 534
497 spin_lock(&cache->lock); 535 spin_lock(&cache->lock);
498 if (ret == 1) { 536 if (ret == 1) {
537 cache->caching_ctl = NULL;
499 cache->cached = BTRFS_CACHE_FINISHED; 538 cache->cached = BTRFS_CACHE_FINISHED;
500 cache->last_byte_to_unpin = (u64)-1; 539 cache->last_byte_to_unpin = (u64)-1;
501 } else { 540 } else {
502 cache->cached = BTRFS_CACHE_NO; 541 if (load_cache_only) {
542 cache->caching_ctl = NULL;
543 cache->cached = BTRFS_CACHE_NO;
544 } else {
545 cache->cached = BTRFS_CACHE_STARTED;
546 }
503 } 547 }
504 spin_unlock(&cache->lock); 548 spin_unlock(&cache->lock);
549 wake_up(&caching_ctl->wait);
505 if (ret == 1) { 550 if (ret == 1) {
551 put_caching_control(caching_ctl);
506 free_excluded_extents(fs_info->extent_root, cache); 552 free_excluded_extents(fs_info->extent_root, cache);
507 return 0; 553 return 0;
508 } 554 }
555 } else {
556 /*
557 * We are not going to do the fast caching, set cached to the
558 * appropriate value and wakeup any waiters.
559 */
560 spin_lock(&cache->lock);
561 if (load_cache_only) {
562 cache->caching_ctl = NULL;
563 cache->cached = BTRFS_CACHE_NO;
564 } else {
565 cache->cached = BTRFS_CACHE_STARTED;
566 }
567 spin_unlock(&cache->lock);
568 wake_up(&caching_ctl->wait);
509 } 569 }
510 570
511 if (load_cache_only) 571 if (load_cache_only) {
512 return 0; 572 put_caching_control(caching_ctl);
513
514 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
515 BUG_ON(!caching_ctl);
516
517 INIT_LIST_HEAD(&caching_ctl->list);
518 mutex_init(&caching_ctl->mutex);
519 init_waitqueue_head(&caching_ctl->wait);
520 caching_ctl->block_group = cache;
521 caching_ctl->progress = cache->key.objectid;
522 /* one for caching kthread, one for caching block group list */
523 atomic_set(&caching_ctl->count, 2);
524 caching_ctl->work.func = caching_thread;
525
526 spin_lock(&cache->lock);
527 if (cache->cached != BTRFS_CACHE_NO) {
528 spin_unlock(&cache->lock);
529 kfree(caching_ctl);
530 return 0; 573 return 0;
531 } 574 }
532 cache->caching_ctl = caching_ctl;
533 cache->cached = BTRFS_CACHE_STARTED;
534 spin_unlock(&cache->lock);
535 575
536 down_write(&fs_info->extent_commit_sem); 576 down_write(&fs_info->extent_commit_sem);
577 atomic_inc(&caching_ctl->count);
537 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 578 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
538 up_write(&fs_info->extent_commit_sem); 579 up_write(&fs_info->extent_commit_sem);
539 580
@@ -2781,7 +2822,7 @@ out_free:
2781 btrfs_release_path(path); 2822 btrfs_release_path(path);
2782out: 2823out:
2783 spin_lock(&block_group->lock); 2824 spin_lock(&block_group->lock);
2784 if (!ret) 2825 if (!ret && dcs == BTRFS_DC_SETUP)
2785 block_group->cache_generation = trans->transid; 2826 block_group->cache_generation = trans->transid;
2786 block_group->disk_cache_state = dcs; 2827 block_group->disk_cache_state = dcs;
2787 spin_unlock(&block_group->lock); 2828 spin_unlock(&block_group->lock);
@@ -3797,16 +3838,16 @@ void btrfs_free_block_rsv(struct btrfs_root *root,
3797 kfree(rsv); 3838 kfree(rsv);
3798} 3839}
3799 3840
3800int btrfs_block_rsv_add(struct btrfs_root *root, 3841static inline int __block_rsv_add(struct btrfs_root *root,
3801 struct btrfs_block_rsv *block_rsv, 3842 struct btrfs_block_rsv *block_rsv,
3802 u64 num_bytes) 3843 u64 num_bytes, int flush)
3803{ 3844{
3804 int ret; 3845 int ret;
3805 3846
3806 if (num_bytes == 0) 3847 if (num_bytes == 0)
3807 return 0; 3848 return 0;
3808 3849
3809 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); 3850 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3810 if (!ret) { 3851 if (!ret) {
3811 block_rsv_add_bytes(block_rsv, num_bytes, 1); 3852 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3812 return 0; 3853 return 0;
@@ -3815,22 +3856,18 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
3815 return ret; 3856 return ret;
3816} 3857}
3817 3858
3859int btrfs_block_rsv_add(struct btrfs_root *root,
3860 struct btrfs_block_rsv *block_rsv,
3861 u64 num_bytes)
3862{
3863 return __block_rsv_add(root, block_rsv, num_bytes, 1);
3864}
3865
3818int btrfs_block_rsv_add_noflush(struct btrfs_root *root, 3866int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
3819 struct btrfs_block_rsv *block_rsv, 3867 struct btrfs_block_rsv *block_rsv,
3820 u64 num_bytes) 3868 u64 num_bytes)
3821{ 3869{
3822 int ret; 3870 return __block_rsv_add(root, block_rsv, num_bytes, 0);
3823
3824 if (num_bytes == 0)
3825 return 0;
3826
3827 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 0);
3828 if (!ret) {
3829 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3830 return 0;
3831 }
3832
3833 return ret;
3834} 3871}
3835 3872
3836int btrfs_block_rsv_check(struct btrfs_root *root, 3873int btrfs_block_rsv_check(struct btrfs_root *root,
@@ -3851,9 +3888,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
3851 return ret; 3888 return ret;
3852} 3889}
3853 3890
3854int btrfs_block_rsv_refill(struct btrfs_root *root, 3891static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
3855 struct btrfs_block_rsv *block_rsv, 3892 struct btrfs_block_rsv *block_rsv,
3856 u64 min_reserved) 3893 u64 min_reserved, int flush)
3857{ 3894{
3858 u64 num_bytes = 0; 3895 u64 num_bytes = 0;
3859 int ret = -ENOSPC; 3896 int ret = -ENOSPC;
@@ -3872,7 +3909,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
3872 if (!ret) 3909 if (!ret)
3873 return 0; 3910 return 0;
3874 3911
3875 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); 3912 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3876 if (!ret) { 3913 if (!ret) {
3877 block_rsv_add_bytes(block_rsv, num_bytes, 0); 3914 block_rsv_add_bytes(block_rsv, num_bytes, 0);
3878 return 0; 3915 return 0;
@@ -3881,6 +3918,20 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
3881 return ret; 3918 return ret;
3882} 3919}
3883 3920
3921int btrfs_block_rsv_refill(struct btrfs_root *root,
3922 struct btrfs_block_rsv *block_rsv,
3923 u64 min_reserved)
3924{
3925 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
3926}
3927
3928int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
3929 struct btrfs_block_rsv *block_rsv,
3930 u64 min_reserved)
3931{
3932 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
3933}
3934
3884int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 3935int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3885 struct btrfs_block_rsv *dst_rsv, 3936 struct btrfs_block_rsv *dst_rsv,
3886 u64 num_bytes) 3937 u64 num_bytes)
@@ -4064,23 +4115,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4064 */ 4115 */
4065static unsigned drop_outstanding_extent(struct inode *inode) 4116static unsigned drop_outstanding_extent(struct inode *inode)
4066{ 4117{
4118 unsigned drop_inode_space = 0;
4067 unsigned dropped_extents = 0; 4119 unsigned dropped_extents = 0;
4068 4120
4069 BUG_ON(!BTRFS_I(inode)->outstanding_extents); 4121 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4070 BTRFS_I(inode)->outstanding_extents--; 4122 BTRFS_I(inode)->outstanding_extents--;
4071 4123
4124 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4125 BTRFS_I(inode)->delalloc_meta_reserved) {
4126 drop_inode_space = 1;
4127 BTRFS_I(inode)->delalloc_meta_reserved = 0;
4128 }
4129
4072 /* 4130 /*
4073 * If we have more or the same amount of outsanding extents than we have 4131 * If we have more or the same amount of outsanding extents than we have
4074 * reserved then we need to leave the reserved extents count alone. 4132 * reserved then we need to leave the reserved extents count alone.
4075 */ 4133 */
4076 if (BTRFS_I(inode)->outstanding_extents >= 4134 if (BTRFS_I(inode)->outstanding_extents >=
4077 BTRFS_I(inode)->reserved_extents) 4135 BTRFS_I(inode)->reserved_extents)
4078 return 0; 4136 return drop_inode_space;
4079 4137
4080 dropped_extents = BTRFS_I(inode)->reserved_extents - 4138 dropped_extents = BTRFS_I(inode)->reserved_extents -
4081 BTRFS_I(inode)->outstanding_extents; 4139 BTRFS_I(inode)->outstanding_extents;
4082 BTRFS_I(inode)->reserved_extents -= dropped_extents; 4140 BTRFS_I(inode)->reserved_extents -= dropped_extents;
4083 return dropped_extents; 4141 return dropped_extents + drop_inode_space;
4084} 4142}
4085 4143
4086/** 4144/**
@@ -4146,12 +4204,17 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4146 struct btrfs_root *root = BTRFS_I(inode)->root; 4204 struct btrfs_root *root = BTRFS_I(inode)->root;
4147 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; 4205 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4148 u64 to_reserve = 0; 4206 u64 to_reserve = 0;
4207 u64 csum_bytes;
4149 unsigned nr_extents = 0; 4208 unsigned nr_extents = 0;
4209 int extra_reserve = 0;
4150 int flush = 1; 4210 int flush = 1;
4151 int ret; 4211 int ret;
4152 4212
4213 /* Need to be holding the i_mutex here if we aren't free space cache */
4153 if (btrfs_is_free_space_inode(root, inode)) 4214 if (btrfs_is_free_space_inode(root, inode))
4154 flush = 0; 4215 flush = 0;
4216 else
4217 WARN_ON(!mutex_is_locked(&inode->i_mutex));
4155 4218
4156 if (flush && btrfs_transaction_in_commit(root->fs_info)) 4219 if (flush && btrfs_transaction_in_commit(root->fs_info))
4157 schedule_timeout(1); 4220 schedule_timeout(1);
@@ -4162,14 +4225,22 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4162 BTRFS_I(inode)->outstanding_extents++; 4225 BTRFS_I(inode)->outstanding_extents++;
4163 4226
4164 if (BTRFS_I(inode)->outstanding_extents > 4227 if (BTRFS_I(inode)->outstanding_extents >
4165 BTRFS_I(inode)->reserved_extents) { 4228 BTRFS_I(inode)->reserved_extents)
4166 nr_extents = BTRFS_I(inode)->outstanding_extents - 4229 nr_extents = BTRFS_I(inode)->outstanding_extents -
4167 BTRFS_I(inode)->reserved_extents; 4230 BTRFS_I(inode)->reserved_extents;
4168 BTRFS_I(inode)->reserved_extents += nr_extents;
4169 4231
4170 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); 4232 /*
4233 * Add an item to reserve for updating the inode when we complete the
4234 * delalloc io.
4235 */
4236 if (!BTRFS_I(inode)->delalloc_meta_reserved) {
4237 nr_extents++;
4238 extra_reserve = 1;
4171 } 4239 }
4240
4241 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4172 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); 4242 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4243 csum_bytes = BTRFS_I(inode)->csum_bytes;
4173 spin_unlock(&BTRFS_I(inode)->lock); 4244 spin_unlock(&BTRFS_I(inode)->lock);
4174 4245
4175 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); 4246 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
@@ -4179,22 +4250,35 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4179 4250
4180 spin_lock(&BTRFS_I(inode)->lock); 4251 spin_lock(&BTRFS_I(inode)->lock);
4181 dropped = drop_outstanding_extent(inode); 4252 dropped = drop_outstanding_extent(inode);
4182 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4183 spin_unlock(&BTRFS_I(inode)->lock);
4184 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4185
4186 /* 4253 /*
4187 * Somebody could have come in and twiddled with the 4254 * If the inodes csum_bytes is the same as the original
4188 * reservation, so if we have to free more than we would have 4255 * csum_bytes then we know we haven't raced with any free()ers
4189 * reserved from this reservation go ahead and release those 4256 * so we can just reduce our inodes csum bytes and carry on.
4190 * bytes. 4257 * Otherwise we have to do the normal free thing to account for
4258 * the case that the free side didn't free up its reserve
4259 * because of this outstanding reservation.
4191 */ 4260 */
4192 to_free -= to_reserve; 4261 if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4262 calc_csum_metadata_size(inode, num_bytes, 0);
4263 else
4264 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4265 spin_unlock(&BTRFS_I(inode)->lock);
4266 if (dropped)
4267 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4268
4193 if (to_free) 4269 if (to_free)
4194 btrfs_block_rsv_release(root, block_rsv, to_free); 4270 btrfs_block_rsv_release(root, block_rsv, to_free);
4195 return ret; 4271 return ret;
4196 } 4272 }
4197 4273
4274 spin_lock(&BTRFS_I(inode)->lock);
4275 if (extra_reserve) {
4276 BTRFS_I(inode)->delalloc_meta_reserved = 1;
4277 nr_extents--;
4278 }
4279 BTRFS_I(inode)->reserved_extents += nr_extents;
4280 spin_unlock(&BTRFS_I(inode)->lock);
4281
4198 block_rsv_add_bytes(block_rsv, to_reserve, 1); 4282 block_rsv_add_bytes(block_rsv, to_reserve, 1);
4199 4283
4200 return 0; 4284 return 0;
@@ -5040,11 +5124,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5040 struct btrfs_root *root = orig_root->fs_info->extent_root; 5124 struct btrfs_root *root = orig_root->fs_info->extent_root;
5041 struct btrfs_free_cluster *last_ptr = NULL; 5125 struct btrfs_free_cluster *last_ptr = NULL;
5042 struct btrfs_block_group_cache *block_group = NULL; 5126 struct btrfs_block_group_cache *block_group = NULL;
5127 struct btrfs_block_group_cache *used_block_group;
5043 int empty_cluster = 2 * 1024 * 1024; 5128 int empty_cluster = 2 * 1024 * 1024;
5044 int allowed_chunk_alloc = 0; 5129 int allowed_chunk_alloc = 0;
5045 int done_chunk_alloc = 0; 5130 int done_chunk_alloc = 0;
5046 struct btrfs_space_info *space_info; 5131 struct btrfs_space_info *space_info;
5047 int last_ptr_loop = 0;
5048 int loop = 0; 5132 int loop = 0;
5049 int index = 0; 5133 int index = 0;
5050 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ? 5134 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
@@ -5106,6 +5190,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5106ideal_cache: 5190ideal_cache:
5107 block_group = btrfs_lookup_block_group(root->fs_info, 5191 block_group = btrfs_lookup_block_group(root->fs_info,
5108 search_start); 5192 search_start);
5193 used_block_group = block_group;
5109 /* 5194 /*
5110 * we don't want to use the block group if it doesn't match our 5195 * we don't want to use the block group if it doesn't match our
5111 * allocation bits, or if its not cached. 5196 * allocation bits, or if its not cached.
@@ -5143,6 +5228,7 @@ search:
5143 u64 offset; 5228 u64 offset;
5144 int cached; 5229 int cached;
5145 5230
5231 used_block_group = block_group;
5146 btrfs_get_block_group(block_group); 5232 btrfs_get_block_group(block_group);
5147 search_start = block_group->key.objectid; 5233 search_start = block_group->key.objectid;
5148 5234
@@ -5166,13 +5252,15 @@ search:
5166 } 5252 }
5167 5253
5168have_block_group: 5254have_block_group:
5169 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { 5255 cached = block_group_cache_done(block_group);
5256 if (unlikely(!cached)) {
5170 u64 free_percent; 5257 u64 free_percent;
5171 5258
5259 found_uncached_bg = true;
5172 ret = cache_block_group(block_group, trans, 5260 ret = cache_block_group(block_group, trans,
5173 orig_root, 1); 5261 orig_root, 1);
5174 if (block_group->cached == BTRFS_CACHE_FINISHED) 5262 if (block_group->cached == BTRFS_CACHE_FINISHED)
5175 goto have_block_group; 5263 goto alloc;
5176 5264
5177 free_percent = btrfs_block_group_used(&block_group->item); 5265 free_percent = btrfs_block_group_used(&block_group->item);
5178 free_percent *= 100; 5266 free_percent *= 100;
@@ -5194,7 +5282,6 @@ have_block_group:
5194 orig_root, 0); 5282 orig_root, 0);
5195 BUG_ON(ret); 5283 BUG_ON(ret);
5196 } 5284 }
5197 found_uncached_bg = true;
5198 5285
5199 /* 5286 /*
5200 * If loop is set for cached only, try the next block 5287 * If loop is set for cached only, try the next block
@@ -5204,94 +5291,80 @@ have_block_group:
5204 goto loop; 5291 goto loop;
5205 } 5292 }
5206 5293
5207 cached = block_group_cache_done(block_group); 5294alloc:
5208 if (unlikely(!cached))
5209 found_uncached_bg = true;
5210
5211 if (unlikely(block_group->ro)) 5295 if (unlikely(block_group->ro))
5212 goto loop; 5296 goto loop;
5213 5297
5214 spin_lock(&block_group->free_space_ctl->tree_lock); 5298 spin_lock(&block_group->free_space_ctl->tree_lock);
5215 if (cached && 5299 if (cached &&
5216 block_group->free_space_ctl->free_space < 5300 block_group->free_space_ctl->free_space <
5217 num_bytes + empty_size) { 5301 num_bytes + empty_cluster + empty_size) {
5218 spin_unlock(&block_group->free_space_ctl->tree_lock); 5302 spin_unlock(&block_group->free_space_ctl->tree_lock);
5219 goto loop; 5303 goto loop;
5220 } 5304 }
5221 spin_unlock(&block_group->free_space_ctl->tree_lock); 5305 spin_unlock(&block_group->free_space_ctl->tree_lock);
5222 5306
5223 /* 5307 /*
5224 * Ok we want to try and use the cluster allocator, so lets look 5308 * Ok we want to try and use the cluster allocator, so
5225 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will 5309 * lets look there
5226 * have tried the cluster allocator plenty of times at this
5227 * point and not have found anything, so we are likely way too
5228 * fragmented for the clustering stuff to find anything, so lets
5229 * just skip it and let the allocator find whatever block it can
5230 * find
5231 */ 5310 */
5232 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) { 5311 if (last_ptr) {
5233 /* 5312 /*
5234 * the refill lock keeps out other 5313 * the refill lock keeps out other
5235 * people trying to start a new cluster 5314 * people trying to start a new cluster
5236 */ 5315 */
5237 spin_lock(&last_ptr->refill_lock); 5316 spin_lock(&last_ptr->refill_lock);
5238 if (last_ptr->block_group && 5317 used_block_group = last_ptr->block_group;
5239 (last_ptr->block_group->ro || 5318 if (used_block_group != block_group &&
5240 !block_group_bits(last_ptr->block_group, data))) { 5319 (!used_block_group ||
5241 offset = 0; 5320 used_block_group->ro ||
5321 !block_group_bits(used_block_group, data))) {
5322 used_block_group = block_group;
5242 goto refill_cluster; 5323 goto refill_cluster;
5243 } 5324 }
5244 5325
5245 offset = btrfs_alloc_from_cluster(block_group, last_ptr, 5326 if (used_block_group != block_group)
5246 num_bytes, search_start); 5327 btrfs_get_block_group(used_block_group);
5328
5329 offset = btrfs_alloc_from_cluster(used_block_group,
5330 last_ptr, num_bytes, used_block_group->key.objectid);
5247 if (offset) { 5331 if (offset) {
5248 /* we have a block, we're done */ 5332 /* we have a block, we're done */
5249 spin_unlock(&last_ptr->refill_lock); 5333 spin_unlock(&last_ptr->refill_lock);
5250 goto checks; 5334 goto checks;
5251 } 5335 }
5252 5336
5253 spin_lock(&last_ptr->lock); 5337 WARN_ON(last_ptr->block_group != used_block_group);
5254 /* 5338 if (used_block_group != block_group) {
5255 * whoops, this cluster doesn't actually point to 5339 btrfs_put_block_group(used_block_group);
5256 * this block group. Get a ref on the block 5340 used_block_group = block_group;
5257 * group is does point to and try again
5258 */
5259 if (!last_ptr_loop && last_ptr->block_group &&
5260 last_ptr->block_group != block_group &&
5261 index <=
5262 get_block_group_index(last_ptr->block_group)) {
5263
5264 btrfs_put_block_group(block_group);
5265 block_group = last_ptr->block_group;
5266 btrfs_get_block_group(block_group);
5267 spin_unlock(&last_ptr->lock);
5268 spin_unlock(&last_ptr->refill_lock);
5269
5270 last_ptr_loop = 1;
5271 search_start = block_group->key.objectid;
5272 /*
5273 * we know this block group is properly
5274 * in the list because
5275 * btrfs_remove_block_group, drops the
5276 * cluster before it removes the block
5277 * group from the list
5278 */
5279 goto have_block_group;
5280 } 5341 }
5281 spin_unlock(&last_ptr->lock);
5282refill_cluster: 5342refill_cluster:
5343 BUG_ON(used_block_group != block_group);
5344 /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5345 * set up a new clusters, so lets just skip it
5346 * and let the allocator find whatever block
5347 * it can find. If we reach this point, we
5348 * will have tried the cluster allocator
5349 * plenty of times and not have found
5350 * anything, so we are likely way too
5351 * fragmented for the clustering stuff to find
5352 * anything. */
5353 if (loop >= LOOP_NO_EMPTY_SIZE) {
5354 spin_unlock(&last_ptr->refill_lock);
5355 goto unclustered_alloc;
5356 }
5357
5283 /* 5358 /*
5284 * this cluster didn't work out, free it and 5359 * this cluster didn't work out, free it and
5285 * start over 5360 * start over
5286 */ 5361 */
5287 btrfs_return_cluster_to_free_space(NULL, last_ptr); 5362 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5288 5363
5289 last_ptr_loop = 0;
5290
5291 /* allocate a cluster in this block group */ 5364 /* allocate a cluster in this block group */
5292 ret = btrfs_find_space_cluster(trans, root, 5365 ret = btrfs_find_space_cluster(trans, root,
5293 block_group, last_ptr, 5366 block_group, last_ptr,
5294 offset, num_bytes, 5367 search_start, num_bytes,
5295 empty_cluster + empty_size); 5368 empty_cluster + empty_size);
5296 if (ret == 0) { 5369 if (ret == 0) {
5297 /* 5370 /*
@@ -5327,6 +5400,7 @@ refill_cluster:
5327 goto loop; 5400 goto loop;
5328 } 5401 }
5329 5402
5403unclustered_alloc:
5330 offset = btrfs_find_space_for_alloc(block_group, search_start, 5404 offset = btrfs_find_space_for_alloc(block_group, search_start,
5331 num_bytes, empty_size); 5405 num_bytes, empty_size);
5332 /* 5406 /*
@@ -5353,14 +5427,14 @@ checks:
5353 search_start = stripe_align(root, offset); 5427 search_start = stripe_align(root, offset);
5354 /* move on to the next group */ 5428 /* move on to the next group */
5355 if (search_start + num_bytes >= search_end) { 5429 if (search_start + num_bytes >= search_end) {
5356 btrfs_add_free_space(block_group, offset, num_bytes); 5430 btrfs_add_free_space(used_block_group, offset, num_bytes);
5357 goto loop; 5431 goto loop;
5358 } 5432 }
5359 5433
5360 /* move on to the next group */ 5434 /* move on to the next group */
5361 if (search_start + num_bytes > 5435 if (search_start + num_bytes >
5362 block_group->key.objectid + block_group->key.offset) { 5436 used_block_group->key.objectid + used_block_group->key.offset) {
5363 btrfs_add_free_space(block_group, offset, num_bytes); 5437 btrfs_add_free_space(used_block_group, offset, num_bytes);
5364 goto loop; 5438 goto loop;
5365 } 5439 }
5366 5440
@@ -5368,14 +5442,14 @@ checks:
5368 ins->offset = num_bytes; 5442 ins->offset = num_bytes;
5369 5443
5370 if (offset < search_start) 5444 if (offset < search_start)
5371 btrfs_add_free_space(block_group, offset, 5445 btrfs_add_free_space(used_block_group, offset,
5372 search_start - offset); 5446 search_start - offset);
5373 BUG_ON(offset > search_start); 5447 BUG_ON(offset > search_start);
5374 5448
5375 ret = btrfs_update_reserved_bytes(block_group, num_bytes, 5449 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5376 alloc_type); 5450 alloc_type);
5377 if (ret == -EAGAIN) { 5451 if (ret == -EAGAIN) {
5378 btrfs_add_free_space(block_group, offset, num_bytes); 5452 btrfs_add_free_space(used_block_group, offset, num_bytes);
5379 goto loop; 5453 goto loop;
5380 } 5454 }
5381 5455
@@ -5384,15 +5458,19 @@ checks:
5384 ins->offset = num_bytes; 5458 ins->offset = num_bytes;
5385 5459
5386 if (offset < search_start) 5460 if (offset < search_start)
5387 btrfs_add_free_space(block_group, offset, 5461 btrfs_add_free_space(used_block_group, offset,
5388 search_start - offset); 5462 search_start - offset);
5389 BUG_ON(offset > search_start); 5463 BUG_ON(offset > search_start);
5464 if (used_block_group != block_group)
5465 btrfs_put_block_group(used_block_group);
5390 btrfs_put_block_group(block_group); 5466 btrfs_put_block_group(block_group);
5391 break; 5467 break;
5392loop: 5468loop:
5393 failed_cluster_refill = false; 5469 failed_cluster_refill = false;
5394 failed_alloc = false; 5470 failed_alloc = false;
5395 BUG_ON(index != get_block_group_index(block_group)); 5471 BUG_ON(index != get_block_group_index(block_group));
5472 if (used_block_group != block_group)
5473 btrfs_put_block_group(used_block_group);
5396 btrfs_put_block_group(block_group); 5474 btrfs_put_block_group(block_group);
5397 } 5475 }
5398 up_read(&space_info->groups_sem); 5476 up_read(&space_info->groups_sem);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1f87c4d0e7a0..49f3c9dc09f4 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -935,8 +935,10 @@ again:
935 node = tree_search(tree, start); 935 node = tree_search(tree, start);
936 if (!node) { 936 if (!node) {
937 prealloc = alloc_extent_state_atomic(prealloc); 937 prealloc = alloc_extent_state_atomic(prealloc);
938 if (!prealloc) 938 if (!prealloc) {
939 return -ENOMEM; 939 err = -ENOMEM;
940 goto out;
941 }
940 err = insert_state(tree, prealloc, start, end, &bits); 942 err = insert_state(tree, prealloc, start, end, &bits);
941 prealloc = NULL; 943 prealloc = NULL;
942 BUG_ON(err == -EEXIST); 944 BUG_ON(err == -EEXIST);
@@ -992,8 +994,10 @@ hit_next:
992 */ 994 */
993 if (state->start < start) { 995 if (state->start < start) {
994 prealloc = alloc_extent_state_atomic(prealloc); 996 prealloc = alloc_extent_state_atomic(prealloc);
995 if (!prealloc) 997 if (!prealloc) {
996 return -ENOMEM; 998 err = -ENOMEM;
999 goto out;
1000 }
997 err = split_state(tree, state, prealloc, start); 1001 err = split_state(tree, state, prealloc, start);
998 BUG_ON(err == -EEXIST); 1002 BUG_ON(err == -EEXIST);
999 prealloc = NULL; 1003 prealloc = NULL;
@@ -1024,8 +1028,10 @@ hit_next:
1024 this_end = last_start - 1; 1028 this_end = last_start - 1;
1025 1029
1026 prealloc = alloc_extent_state_atomic(prealloc); 1030 prealloc = alloc_extent_state_atomic(prealloc);
1027 if (!prealloc) 1031 if (!prealloc) {
1028 return -ENOMEM; 1032 err = -ENOMEM;
1033 goto out;
1034 }
1029 1035
1030 /* 1036 /*
1031 * Avoid to free 'prealloc' if it can be merged with 1037 * Avoid to free 'prealloc' if it can be merged with
@@ -1051,8 +1057,10 @@ hit_next:
1051 */ 1057 */
1052 if (state->start <= end && state->end > end) { 1058 if (state->start <= end && state->end > end) {
1053 prealloc = alloc_extent_state_atomic(prealloc); 1059 prealloc = alloc_extent_state_atomic(prealloc);
1054 if (!prealloc) 1060 if (!prealloc) {
1055 return -ENOMEM; 1061 err = -ENOMEM;
1062 goto out;
1063 }
1056 1064
1057 err = split_state(tree, state, prealloc, end + 1); 1065 err = split_state(tree, state, prealloc, end + 1);
1058 BUG_ON(err == -EEXIST); 1066 BUG_ON(err == -EEXIST);
@@ -2285,16 +2293,22 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2285 clean_io_failure(start, page); 2293 clean_io_failure(start, page);
2286 } 2294 }
2287 if (!uptodate) { 2295 if (!uptodate) {
2288 u64 failed_mirror; 2296 int failed_mirror;
2289 failed_mirror = (u64)bio->bi_bdev; 2297 failed_mirror = (int)(unsigned long)bio->bi_bdev;
2290 if (tree->ops && tree->ops->readpage_io_failed_hook) 2298 /*
2291 ret = tree->ops->readpage_io_failed_hook( 2299 * The generic bio_readpage_error handles errors the
2292 bio, page, start, end, 2300 * following way: If possible, new read requests are
2293 failed_mirror, state); 2301 * created and submitted and will end up in
2294 else 2302 * end_bio_extent_readpage as well (if we're lucky, not
2295 ret = bio_readpage_error(bio, page, start, end, 2303 * in the !uptodate case). In that case it returns 0 and
2296 failed_mirror, NULL); 2304 * we just go on with the next page in our bio. If it
2305 * can't handle the error it will return -EIO and we
2306 * remain responsible for that page.
2307 */
2308 ret = bio_readpage_error(bio, page, start, end,
2309 failed_mirror, NULL);
2297 if (ret == 0) { 2310 if (ret == 0) {
2311error_handled:
2298 uptodate = 2312 uptodate =
2299 test_bit(BIO_UPTODATE, &bio->bi_flags); 2313 test_bit(BIO_UPTODATE, &bio->bi_flags);
2300 if (err) 2314 if (err)
@@ -2302,6 +2316,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2302 uncache_state(&cached); 2316 uncache_state(&cached);
2303 continue; 2317 continue;
2304 } 2318 }
2319 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2320 ret = tree->ops->readpage_io_failed_hook(
2321 bio, page, start, end,
2322 failed_mirror, state);
2323 if (ret == 0)
2324 goto error_handled;
2325 }
2305 } 2326 }
2306 2327
2307 if (uptodate) { 2328 if (uptodate) {
@@ -3366,6 +3387,9 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3366 return -ENOMEM; 3387 return -ENOMEM;
3367 path->leave_spinning = 1; 3388 path->leave_spinning = 1;
3368 3389
3390 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3391 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3392
3369 /* 3393 /*
3370 * lookup the last file extent. We're not using i_size here 3394 * lookup the last file extent. We're not using i_size here
3371 * because there might be preallocation past i_size 3395 * because there might be preallocation past i_size
@@ -3413,7 +3437,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3413 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 3437 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3414 &cached_state, GFP_NOFS); 3438 &cached_state, GFP_NOFS);
3415 3439
3416 em = get_extent_skip_holes(inode, off, last_for_get_extent, 3440 em = get_extent_skip_holes(inode, start, last_for_get_extent,
3417 get_extent); 3441 get_extent);
3418 if (!em) 3442 if (!em)
3419 goto out; 3443 goto out;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index feb9be0e23bc..7604c3001322 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -70,7 +70,7 @@ struct extent_io_ops {
70 unsigned long bio_flags); 70 unsigned long bio_flags);
71 int (*readpage_io_hook)(struct page *page, u64 start, u64 end); 71 int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
72 int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, 72 int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
73 u64 start, u64 end, u64 failed_mirror, 73 u64 start, u64 end, int failed_mirror,
74 struct extent_state *state); 74 struct extent_state *state);
75 int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, 75 int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
76 u64 start, u64 end, 76 u64 start, u64 end,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index dafdfa059bf6..97fbe939c050 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1167,6 +1167,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1167 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / 1167 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1168 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / 1168 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1169 (sizeof(struct page *))); 1169 (sizeof(struct page *)));
1170 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1171 nrptrs = max(nrptrs, 8);
1170 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 1172 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1171 if (!pages) 1173 if (!pages)
1172 return -ENOMEM; 1174 return -ENOMEM;
@@ -1387,7 +1389,11 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1387 goto out; 1389 goto out;
1388 } 1390 }
1389 1391
1390 file_update_time(file); 1392 err = btrfs_update_time(file);
1393 if (err) {
1394 mutex_unlock(&inode->i_mutex);
1395 goto out;
1396 }
1391 BTRFS_I(inode)->sequence++; 1397 BTRFS_I(inode)->sequence++;
1392 1398
1393 start_pos = round_down(pos, root->sectorsize); 1399 start_pos = round_down(pos, root->sectorsize);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 7a15fcfb3e1f..ec23d43d0c35 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -351,6 +351,11 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
351 } 351 }
352 } 352 }
353 353
354 for (i = 0; i < io_ctl->num_pages; i++) {
355 clear_page_dirty_for_io(io_ctl->pages[i]);
356 set_page_extent_mapped(io_ctl->pages[i]);
357 }
358
354 return 0; 359 return 0;
355} 360}
356 361
@@ -537,6 +542,13 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
537 struct btrfs_free_space *entry, u8 *type) 542 struct btrfs_free_space *entry, u8 *type)
538{ 543{
539 struct btrfs_free_space_entry *e; 544 struct btrfs_free_space_entry *e;
545 int ret;
546
547 if (!io_ctl->cur) {
548 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
549 if (ret)
550 return ret;
551 }
540 552
541 e = io_ctl->cur; 553 e = io_ctl->cur;
542 entry->offset = le64_to_cpu(e->offset); 554 entry->offset = le64_to_cpu(e->offset);
@@ -550,10 +562,7 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
550 562
551 io_ctl_unmap_page(io_ctl); 563 io_ctl_unmap_page(io_ctl);
552 564
553 if (io_ctl->index >= io_ctl->num_pages) 565 return 0;
554 return 0;
555
556 return io_ctl_check_crc(io_ctl, io_ctl->index);
557} 566}
558 567
559static int io_ctl_read_bitmap(struct io_ctl *io_ctl, 568static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
@@ -561,9 +570,6 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
561{ 570{
562 int ret; 571 int ret;
563 572
564 if (io_ctl->cur && io_ctl->cur != io_ctl->orig)
565 io_ctl_unmap_page(io_ctl);
566
567 ret = io_ctl_check_crc(io_ctl, io_ctl->index); 573 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
568 if (ret) 574 if (ret)
569 return ret; 575 return ret;
@@ -699,6 +705,8 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
699 num_entries--; 705 num_entries--;
700 } 706 }
701 707
708 io_ctl_unmap_page(&io_ctl);
709
702 /* 710 /*
703 * We add the bitmaps at the end of the entries in order that 711 * We add the bitmaps at the end of the entries in order that
704 * the bitmap entries are added to the cache. 712 * the bitmap entries are added to the cache.
@@ -1462,6 +1470,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1462{ 1470{
1463 info->offset = offset_to_bitmap(ctl, offset); 1471 info->offset = offset_to_bitmap(ctl, offset);
1464 info->bytes = 0; 1472 info->bytes = 0;
1473 INIT_LIST_HEAD(&info->list);
1465 link_free_space(ctl, info); 1474 link_free_space(ctl, info);
1466 ctl->total_bitmaps++; 1475 ctl->total_bitmaps++;
1467 1476
@@ -1841,7 +1850,13 @@ again:
1841 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1850 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1842 1, 0); 1851 1, 0);
1843 if (!info) { 1852 if (!info) {
1844 WARN_ON(1); 1853 /* the tree logging code might be calling us before we
1854 * have fully loaded the free space rbtree for this
1855 * block group. So it is possible the entry won't
1856 * be in the rbtree yet at all. The caching code
1857 * will make sure not to put it in the rbtree if
1858 * the logging code has pinned it.
1859 */
1845 goto out_lock; 1860 goto out_lock;
1846 } 1861 }
1847 } 1862 }
@@ -2305,6 +2320,7 @@ again:
2305 2320
2306 if (!found) { 2321 if (!found) {
2307 start = i; 2322 start = i;
2323 cluster->max_size = 0;
2308 found = true; 2324 found = true;
2309 } 2325 }
2310 2326
@@ -2448,16 +2464,23 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2448{ 2464{
2449 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2465 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2450 struct btrfs_free_space *entry; 2466 struct btrfs_free_space *entry;
2451 struct rb_node *node;
2452 int ret = -ENOSPC; 2467 int ret = -ENOSPC;
2468 u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2453 2469
2454 if (ctl->total_bitmaps == 0) 2470 if (ctl->total_bitmaps == 0)
2455 return -ENOSPC; 2471 return -ENOSPC;
2456 2472
2457 /* 2473 /*
2458 * First check our cached list of bitmaps and see if there is an entry 2474 * The bitmap that covers offset won't be in the list unless offset
2459 * here that will work. 2475 * is just its start offset.
2460 */ 2476 */
2477 entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
2478 if (entry->offset != bitmap_offset) {
2479 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
2480 if (entry && list_empty(&entry->list))
2481 list_add(&entry->list, bitmaps);
2482 }
2483
2461 list_for_each_entry(entry, bitmaps, list) { 2484 list_for_each_entry(entry, bitmaps, list) {
2462 if (entry->bytes < min_bytes) 2485 if (entry->bytes < min_bytes)
2463 continue; 2486 continue;
@@ -2468,38 +2491,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2468 } 2491 }
2469 2492
2470 /* 2493 /*
2471 * If we do have entries on our list and we are here then we didn't find 2494 * The bitmaps list has all the bitmaps that record free space
2472 * anything, so go ahead and get the next entry after the last entry in 2495 * starting after offset, so no more search is required.
2473 * this list and start the search from there.
2474 */ 2496 */
2475 if (!list_empty(bitmaps)) { 2497 return -ENOSPC;
2476 entry = list_entry(bitmaps->prev, struct btrfs_free_space,
2477 list);
2478 node = rb_next(&entry->offset_index);
2479 if (!node)
2480 return -ENOSPC;
2481 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2482 goto search;
2483 }
2484
2485 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
2486 if (!entry)
2487 return -ENOSPC;
2488
2489search:
2490 node = &entry->offset_index;
2491 do {
2492 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2493 node = rb_next(&entry->offset_index);
2494 if (!entry->bitmap)
2495 continue;
2496 if (entry->bytes < min_bytes)
2497 continue;
2498 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2499 bytes, min_bytes);
2500 } while (ret && node);
2501
2502 return ret;
2503} 2498}
2504 2499
2505/* 2500/*
@@ -2517,8 +2512,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2517 u64 offset, u64 bytes, u64 empty_size) 2512 u64 offset, u64 bytes, u64 empty_size)
2518{ 2513{
2519 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2514 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2520 struct list_head bitmaps;
2521 struct btrfs_free_space *entry, *tmp; 2515 struct btrfs_free_space *entry, *tmp;
2516 LIST_HEAD(bitmaps);
2522 u64 min_bytes; 2517 u64 min_bytes;
2523 int ret; 2518 int ret;
2524 2519
@@ -2557,7 +2552,6 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2557 goto out; 2552 goto out;
2558 } 2553 }
2559 2554
2560 INIT_LIST_HEAD(&bitmaps);
2561 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, 2555 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2562 bytes, min_bytes); 2556 bytes, min_bytes);
2563 if (ret) 2557 if (ret)
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 53dcbdf446cd..f8962a957d65 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -398,6 +398,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
398 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 398 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
399 struct btrfs_path *path; 399 struct btrfs_path *path;
400 struct inode *inode; 400 struct inode *inode;
401 struct btrfs_block_rsv *rsv;
402 u64 num_bytes;
401 u64 alloc_hint = 0; 403 u64 alloc_hint = 0;
402 int ret; 404 int ret;
403 int prealloc; 405 int prealloc;
@@ -421,11 +423,26 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
421 if (!path) 423 if (!path)
422 return -ENOMEM; 424 return -ENOMEM;
423 425
426 rsv = trans->block_rsv;
427 trans->block_rsv = &root->fs_info->trans_block_rsv;
428
429 num_bytes = trans->bytes_reserved;
430 /*
431 * 1 item for inode item insertion if need
432 * 3 items for inode item update (in the worst case)
433 * 1 item for free space object
434 * 3 items for pre-allocation
435 */
436 trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8);
437 ret = btrfs_block_rsv_add_noflush(root, trans->block_rsv,
438 trans->bytes_reserved);
439 if (ret)
440 goto out;
424again: 441again:
425 inode = lookup_free_ino_inode(root, path); 442 inode = lookup_free_ino_inode(root, path);
426 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 443 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
427 ret = PTR_ERR(inode); 444 ret = PTR_ERR(inode);
428 goto out; 445 goto out_release;
429 } 446 }
430 447
431 if (IS_ERR(inode)) { 448 if (IS_ERR(inode)) {
@@ -434,7 +451,7 @@ again:
434 451
435 ret = create_free_ino_inode(root, trans, path); 452 ret = create_free_ino_inode(root, trans, path);
436 if (ret) 453 if (ret)
437 goto out; 454 goto out_release;
438 goto again; 455 goto again;
439 } 456 }
440 457
@@ -477,11 +494,14 @@ again:
477 } 494 }
478 btrfs_free_reserved_data_space(inode, prealloc); 495 btrfs_free_reserved_data_space(inode, prealloc);
479 496
497 ret = btrfs_write_out_ino_cache(root, trans, path);
480out_put: 498out_put:
481 iput(inode); 499 iput(inode);
500out_release:
501 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
482out: 502out:
483 if (ret == 0) 503 trans->block_rsv = rsv;
484 ret = btrfs_write_out_ino_cache(root, trans, path); 504 trans->bytes_reserved = num_bytes;
485 505
486 btrfs_free_path(path); 506 btrfs_free_path(path);
487 return ret; 507 return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 966ddcc4c63d..0a6b928813a4 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -38,6 +38,7 @@
38#include <linux/falloc.h> 38#include <linux/falloc.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/ratelimit.h> 40#include <linux/ratelimit.h>
41#include <linux/mount.h>
41#include "compat.h" 42#include "compat.h"
42#include "ctree.h" 43#include "ctree.h"
43#include "disk-io.h" 44#include "disk-io.h"
@@ -93,6 +94,8 @@ static noinline int cow_file_range(struct inode *inode,
93 struct page *locked_page, 94 struct page *locked_page,
94 u64 start, u64 end, int *page_started, 95 u64 start, u64 end, int *page_started,
95 unsigned long *nr_written, int unlock); 96 unsigned long *nr_written, int unlock);
97static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
98 struct btrfs_root *root, struct inode *inode);
96 99
97static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 100static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
98 struct inode *inode, struct inode *dir, 101 struct inode *inode, struct inode *dir,
@@ -1741,7 +1744,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1741 trans = btrfs_join_transaction(root); 1744 trans = btrfs_join_transaction(root);
1742 BUG_ON(IS_ERR(trans)); 1745 BUG_ON(IS_ERR(trans));
1743 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1746 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1744 ret = btrfs_update_inode(trans, root, inode); 1747 ret = btrfs_update_inode_fallback(trans, root, inode);
1745 BUG_ON(ret); 1748 BUG_ON(ret);
1746 } 1749 }
1747 goto out; 1750 goto out;
@@ -1791,7 +1794,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1791 1794
1792 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1795 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1793 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1796 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1794 ret = btrfs_update_inode(trans, root, inode); 1797 ret = btrfs_update_inode_fallback(trans, root, inode);
1795 BUG_ON(ret); 1798 BUG_ON(ret);
1796 } 1799 }
1797 ret = 0; 1800 ret = 0;
@@ -2029,7 +2032,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2029 /* insert an orphan item to track this unlinked/truncated file */ 2032 /* insert an orphan item to track this unlinked/truncated file */
2030 if (insert >= 1) { 2033 if (insert >= 1) {
2031 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); 2034 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2032 BUG_ON(ret); 2035 BUG_ON(ret && ret != -EEXIST);
2033 } 2036 }
2034 2037
2035 /* insert an orphan item to track subvolume contains orphan files */ 2038 /* insert an orphan item to track subvolume contains orphan files */
@@ -2156,6 +2159,38 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2156 if (ret && ret != -ESTALE) 2159 if (ret && ret != -ESTALE)
2157 goto out; 2160 goto out;
2158 2161
2162 if (ret == -ESTALE && root == root->fs_info->tree_root) {
2163 struct btrfs_root *dead_root;
2164 struct btrfs_fs_info *fs_info = root->fs_info;
2165 int is_dead_root = 0;
2166
2167 /*
2168 * this is an orphan in the tree root. Currently these
2169 * could come from 2 sources:
2170 * a) a snapshot deletion in progress
2171 * b) a free space cache inode
2172 * We need to distinguish those two, as the snapshot
2173 * orphan must not get deleted.
2174 * find_dead_roots already ran before us, so if this
2175 * is a snapshot deletion, we should find the root
2176 * in the dead_roots list
2177 */
2178 spin_lock(&fs_info->trans_lock);
2179 list_for_each_entry(dead_root, &fs_info->dead_roots,
2180 root_list) {
2181 if (dead_root->root_key.objectid ==
2182 found_key.objectid) {
2183 is_dead_root = 1;
2184 break;
2185 }
2186 }
2187 spin_unlock(&fs_info->trans_lock);
2188 if (is_dead_root) {
2189 /* prevent this orphan from being found again */
2190 key.offset = found_key.objectid - 1;
2191 continue;
2192 }
2193 }
2159 /* 2194 /*
2160 * Inode is already gone but the orphan item is still there, 2195 * Inode is already gone but the orphan item is still there,
2161 * kill the orphan item. 2196 * kill the orphan item.
@@ -2189,7 +2224,14 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2189 continue; 2224 continue;
2190 } 2225 }
2191 nr_truncate++; 2226 nr_truncate++;
2227 /*
2228 * Need to hold the imutex for reservation purposes, not
2229 * a huge deal here but I have a WARN_ON in
2230 * btrfs_delalloc_reserve_space to catch offenders.
2231 */
2232 mutex_lock(&inode->i_mutex);
2192 ret = btrfs_truncate(inode); 2233 ret = btrfs_truncate(inode);
2234 mutex_unlock(&inode->i_mutex);
2193 } else { 2235 } else {
2194 nr_unlink++; 2236 nr_unlink++;
2195 } 2237 }
@@ -2199,6 +2241,9 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2199 if (ret) 2241 if (ret)
2200 goto out; 2242 goto out;
2201 } 2243 }
2244 /* release the path since we're done with it */
2245 btrfs_release_path(path);
2246
2202 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; 2247 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2203 2248
2204 if (root->orphan_block_rsv) 2249 if (root->orphan_block_rsv)
@@ -2426,7 +2471,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
2426/* 2471/*
2427 * copy everything in the in-memory inode into the btree. 2472 * copy everything in the in-memory inode into the btree.
2428 */ 2473 */
2429noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 2474static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
2430 struct btrfs_root *root, struct inode *inode) 2475 struct btrfs_root *root, struct inode *inode)
2431{ 2476{
2432 struct btrfs_inode_item *inode_item; 2477 struct btrfs_inode_item *inode_item;
@@ -2434,21 +2479,6 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2434 struct extent_buffer *leaf; 2479 struct extent_buffer *leaf;
2435 int ret; 2480 int ret;
2436 2481
2437 /*
2438 * If the inode is a free space inode, we can deadlock during commit
2439 * if we put it into the delayed code.
2440 *
2441 * The data relocation inode should also be directly updated
2442 * without delay
2443 */
2444 if (!btrfs_is_free_space_inode(root, inode)
2445 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2446 ret = btrfs_delayed_update_inode(trans, root, inode);
2447 if (!ret)
2448 btrfs_set_inode_last_trans(trans, inode);
2449 return ret;
2450 }
2451
2452 path = btrfs_alloc_path(); 2482 path = btrfs_alloc_path();
2453 if (!path) 2483 if (!path)
2454 return -ENOMEM; 2484 return -ENOMEM;
@@ -2477,6 +2507,43 @@ failed:
2477} 2507}
2478 2508
2479/* 2509/*
2510 * copy everything in the in-memory inode into the btree.
2511 */
2512noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2513 struct btrfs_root *root, struct inode *inode)
2514{
2515 int ret;
2516
2517 /*
2518 * If the inode is a free space inode, we can deadlock during commit
2519 * if we put it into the delayed code.
2520 *
2521 * The data relocation inode should also be directly updated
2522 * without delay
2523 */
2524 if (!btrfs_is_free_space_inode(root, inode)
2525 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2526 ret = btrfs_delayed_update_inode(trans, root, inode);
2527 if (!ret)
2528 btrfs_set_inode_last_trans(trans, inode);
2529 return ret;
2530 }
2531
2532 return btrfs_update_inode_item(trans, root, inode);
2533}
2534
2535static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
2536 struct btrfs_root *root, struct inode *inode)
2537{
2538 int ret;
2539
2540 ret = btrfs_update_inode(trans, root, inode);
2541 if (ret == -ENOSPC)
2542 return btrfs_update_inode_item(trans, root, inode);
2543 return ret;
2544}
2545
2546/*
2480 * unlink helper that gets used here in inode.c and in the tree logging 2547 * unlink helper that gets used here in inode.c and in the tree logging
2481 * recovery code. It remove a link in a directory with a given name, and 2548 * recovery code. It remove a link in a directory with a given name, and
2482 * also drops the back refs in the inode to the directory 2549 * also drops the back refs in the inode to the directory
@@ -3300,7 +3367,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3300 u64 hint_byte = 0; 3367 u64 hint_byte = 0;
3301 hole_size = last_byte - cur_offset; 3368 hole_size = last_byte - cur_offset;
3302 3369
3303 trans = btrfs_start_transaction(root, 2); 3370 trans = btrfs_start_transaction(root, 3);
3304 if (IS_ERR(trans)) { 3371 if (IS_ERR(trans)) {
3305 err = PTR_ERR(trans); 3372 err = PTR_ERR(trans);
3306 break; 3373 break;
@@ -3310,6 +3377,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3310 cur_offset + hole_size, 3377 cur_offset + hole_size,
3311 &hint_byte, 1); 3378 &hint_byte, 1);
3312 if (err) { 3379 if (err) {
3380 btrfs_update_inode(trans, root, inode);
3313 btrfs_end_transaction(trans, root); 3381 btrfs_end_transaction(trans, root);
3314 break; 3382 break;
3315 } 3383 }
@@ -3319,6 +3387,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3319 0, hole_size, 0, hole_size, 3387 0, hole_size, 0, hole_size,
3320 0, 0, 0); 3388 0, 0, 0);
3321 if (err) { 3389 if (err) {
3390 btrfs_update_inode(trans, root, inode);
3322 btrfs_end_transaction(trans, root); 3391 btrfs_end_transaction(trans, root);
3323 break; 3392 break;
3324 } 3393 }
@@ -3326,6 +3395,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3326 btrfs_drop_extent_cache(inode, hole_start, 3395 btrfs_drop_extent_cache(inode, hole_start,
3327 last_byte - 1, 0); 3396 last_byte - 1, 0);
3328 3397
3398 btrfs_update_inode(trans, root, inode);
3329 btrfs_end_transaction(trans, root); 3399 btrfs_end_transaction(trans, root);
3330 } 3400 }
3331 free_extent_map(em); 3401 free_extent_map(em);
@@ -3343,6 +3413,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3343 3413
3344static int btrfs_setsize(struct inode *inode, loff_t newsize) 3414static int btrfs_setsize(struct inode *inode, loff_t newsize)
3345{ 3415{
3416 struct btrfs_root *root = BTRFS_I(inode)->root;
3417 struct btrfs_trans_handle *trans;
3346 loff_t oldsize = i_size_read(inode); 3418 loff_t oldsize = i_size_read(inode);
3347 int ret; 3419 int ret;
3348 3420
@@ -3350,16 +3422,19 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
3350 return 0; 3422 return 0;
3351 3423
3352 if (newsize > oldsize) { 3424 if (newsize > oldsize) {
3353 i_size_write(inode, newsize);
3354 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
3355 truncate_pagecache(inode, oldsize, newsize); 3425 truncate_pagecache(inode, oldsize, newsize);
3356 ret = btrfs_cont_expand(inode, oldsize, newsize); 3426 ret = btrfs_cont_expand(inode, oldsize, newsize);
3357 if (ret) { 3427 if (ret)
3358 btrfs_setsize(inode, oldsize);
3359 return ret; 3428 return ret;
3360 }
3361 3429
3362 mark_inode_dirty(inode); 3430 trans = btrfs_start_transaction(root, 1);
3431 if (IS_ERR(trans))
3432 return PTR_ERR(trans);
3433
3434 i_size_write(inode, newsize);
3435 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
3436 ret = btrfs_update_inode(trans, root, inode);
3437 btrfs_end_transaction_throttle(trans, root);
3363 } else { 3438 } else {
3364 3439
3365 /* 3440 /*
@@ -3399,9 +3474,9 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3399 3474
3400 if (attr->ia_valid) { 3475 if (attr->ia_valid) {
3401 setattr_copy(inode, attr); 3476 setattr_copy(inode, attr);
3402 mark_inode_dirty(inode); 3477 err = btrfs_dirty_inode(inode);
3403 3478
3404 if (attr->ia_valid & ATTR_MODE) 3479 if (!err && attr->ia_valid & ATTR_MODE)
3405 err = btrfs_acl_chmod(inode); 3480 err = btrfs_acl_chmod(inode);
3406 } 3481 }
3407 3482
@@ -3463,7 +3538,7 @@ void btrfs_evict_inode(struct inode *inode)
3463 * doing the truncate. 3538 * doing the truncate.
3464 */ 3539 */
3465 while (1) { 3540 while (1) {
3466 ret = btrfs_block_rsv_refill(root, rsv, min_size); 3541 ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
3467 3542
3468 /* 3543 /*
3469 * Try and steal from the global reserve since we will 3544 * Try and steal from the global reserve since we will
@@ -4177,42 +4252,80 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4177 * FIXME, needs more benchmarking...there are no reasons other than performance 4252 * FIXME, needs more benchmarking...there are no reasons other than performance
4178 * to keep or drop this code. 4253 * to keep or drop this code.
4179 */ 4254 */
4180void btrfs_dirty_inode(struct inode *inode, int flags) 4255int btrfs_dirty_inode(struct inode *inode)
4181{ 4256{
4182 struct btrfs_root *root = BTRFS_I(inode)->root; 4257 struct btrfs_root *root = BTRFS_I(inode)->root;
4183 struct btrfs_trans_handle *trans; 4258 struct btrfs_trans_handle *trans;
4184 int ret; 4259 int ret;
4185 4260
4186 if (BTRFS_I(inode)->dummy_inode) 4261 if (BTRFS_I(inode)->dummy_inode)
4187 return; 4262 return 0;
4188 4263
4189 trans = btrfs_join_transaction(root); 4264 trans = btrfs_join_transaction(root);
4190 BUG_ON(IS_ERR(trans)); 4265 if (IS_ERR(trans))
4266 return PTR_ERR(trans);
4191 4267
4192 ret = btrfs_update_inode(trans, root, inode); 4268 ret = btrfs_update_inode(trans, root, inode);
4193 if (ret && ret == -ENOSPC) { 4269 if (ret && ret == -ENOSPC) {
4194 /* whoops, lets try again with the full transaction */ 4270 /* whoops, lets try again with the full transaction */
4195 btrfs_end_transaction(trans, root); 4271 btrfs_end_transaction(trans, root);
4196 trans = btrfs_start_transaction(root, 1); 4272 trans = btrfs_start_transaction(root, 1);
4197 if (IS_ERR(trans)) { 4273 if (IS_ERR(trans))
4198 printk_ratelimited(KERN_ERR "btrfs: fail to " 4274 return PTR_ERR(trans);
4199 "dirty inode %llu error %ld\n",
4200 (unsigned long long)btrfs_ino(inode),
4201 PTR_ERR(trans));
4202 return;
4203 }
4204 4275
4205 ret = btrfs_update_inode(trans, root, inode); 4276 ret = btrfs_update_inode(trans, root, inode);
4206 if (ret) {
4207 printk_ratelimited(KERN_ERR "btrfs: fail to "
4208 "dirty inode %llu error %d\n",
4209 (unsigned long long)btrfs_ino(inode),
4210 ret);
4211 }
4212 } 4277 }
4213 btrfs_end_transaction(trans, root); 4278 btrfs_end_transaction(trans, root);
4214 if (BTRFS_I(inode)->delayed_node) 4279 if (BTRFS_I(inode)->delayed_node)
4215 btrfs_balance_delayed_items(root); 4280 btrfs_balance_delayed_items(root);
4281
4282 return ret;
4283}
4284
4285/*
4286 * This is a copy of file_update_time. We need this so we can return error on
4287 * ENOSPC for updating the inode in the case of file write and mmap writes.
4288 */
4289int btrfs_update_time(struct file *file)
4290{
4291 struct inode *inode = file->f_path.dentry->d_inode;
4292 struct timespec now;
4293 int ret;
4294 enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
4295
4296 /* First try to exhaust all avenues to not sync */
4297 if (IS_NOCMTIME(inode))
4298 return 0;
4299
4300 now = current_fs_time(inode->i_sb);
4301 if (!timespec_equal(&inode->i_mtime, &now))
4302 sync_it = S_MTIME;
4303
4304 if (!timespec_equal(&inode->i_ctime, &now))
4305 sync_it |= S_CTIME;
4306
4307 if (IS_I_VERSION(inode))
4308 sync_it |= S_VERSION;
4309
4310 if (!sync_it)
4311 return 0;
4312
4313 /* Finally allowed to write? Takes lock. */
4314 if (mnt_want_write_file(file))
4315 return 0;
4316
4317 /* Only change inode inside the lock region */
4318 if (sync_it & S_VERSION)
4319 inode_inc_iversion(inode);
4320 if (sync_it & S_CTIME)
4321 inode->i_ctime = now;
4322 if (sync_it & S_MTIME)
4323 inode->i_mtime = now;
4324 ret = btrfs_dirty_inode(inode);
4325 if (!ret)
4326 mark_inode_dirty_sync(inode);
4327 mnt_drop_write(file->f_path.mnt);
4328 return ret;
4216} 4329}
4217 4330
4218/* 4331/*
@@ -4528,11 +4641,18 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4528 goto out_unlock; 4641 goto out_unlock;
4529 } 4642 }
4530 4643
4644 /*
4645 * If the active LSM wants to access the inode during
4646 * d_instantiate it needs these. Smack checks to see
4647 * if the filesystem supports xattrs by looking at the
4648 * ops vector.
4649 */
4650
4651 inode->i_op = &btrfs_special_inode_operations;
4531 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 4652 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4532 if (err) 4653 if (err)
4533 drop_inode = 1; 4654 drop_inode = 1;
4534 else { 4655 else {
4535 inode->i_op = &btrfs_special_inode_operations;
4536 init_special_inode(inode, inode->i_mode, rdev); 4656 init_special_inode(inode, inode->i_mode, rdev);
4537 btrfs_update_inode(trans, root, inode); 4657 btrfs_update_inode(trans, root, inode);
4538 } 4658 }
@@ -4586,14 +4706,21 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4586 goto out_unlock; 4706 goto out_unlock;
4587 } 4707 }
4588 4708
4709 /*
4710 * If the active LSM wants to access the inode during
4711 * d_instantiate it needs these. Smack checks to see
4712 * if the filesystem supports xattrs by looking at the
4713 * ops vector.
4714 */
4715 inode->i_fop = &btrfs_file_operations;
4716 inode->i_op = &btrfs_file_inode_operations;
4717
4589 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 4718 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4590 if (err) 4719 if (err)
4591 drop_inode = 1; 4720 drop_inode = 1;
4592 else { 4721 else {
4593 inode->i_mapping->a_ops = &btrfs_aops; 4722 inode->i_mapping->a_ops = &btrfs_aops;
4594 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 4723 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4595 inode->i_fop = &btrfs_file_operations;
4596 inode->i_op = &btrfs_file_inode_operations;
4597 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 4724 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4598 } 4725 }
4599out_unlock: 4726out_unlock:
@@ -5632,7 +5759,7 @@ again:
5632 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { 5759 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
5633 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5760 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5634 if (!ret) 5761 if (!ret)
5635 err = btrfs_update_inode(trans, root, inode); 5762 err = btrfs_update_inode_fallback(trans, root, inode);
5636 goto out; 5763 goto out;
5637 } 5764 }
5638 5765
@@ -5670,7 +5797,7 @@ again:
5670 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); 5797 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5671 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5798 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5672 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) 5799 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
5673 btrfs_update_inode(trans, root, inode); 5800 btrfs_update_inode_fallback(trans, root, inode);
5674 ret = 0; 5801 ret = 0;
5675out_unlock: 5802out_unlock:
5676 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, 5803 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
@@ -6276,7 +6403,12 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
6276 u64 page_start; 6403 u64 page_start;
6277 u64 page_end; 6404 u64 page_end;
6278 6405
6406 /* Need this to keep space reservations serialized */
6407 mutex_lock(&inode->i_mutex);
6279 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 6408 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
6409 mutex_unlock(&inode->i_mutex);
6410 if (!ret)
6411 ret = btrfs_update_time(vma->vm_file);
6280 if (ret) { 6412 if (ret) {
6281 if (ret == -ENOMEM) 6413 if (ret == -ENOMEM)
6282 ret = VM_FAULT_OOM; 6414 ret = VM_FAULT_OOM;
@@ -6488,8 +6620,9 @@ static int btrfs_truncate(struct inode *inode)
6488 /* Just need the 1 for updating the inode */ 6620 /* Just need the 1 for updating the inode */
6489 trans = btrfs_start_transaction(root, 1); 6621 trans = btrfs_start_transaction(root, 1);
6490 if (IS_ERR(trans)) { 6622 if (IS_ERR(trans)) {
6491 err = PTR_ERR(trans); 6623 ret = err = PTR_ERR(trans);
6492 goto out; 6624 trans = NULL;
6625 break;
6493 } 6626 }
6494 } 6627 }
6495 6628
@@ -6529,14 +6662,16 @@ end_trans:
6529 ret = btrfs_orphan_del(NULL, inode); 6662 ret = btrfs_orphan_del(NULL, inode);
6530 } 6663 }
6531 6664
6532 trans->block_rsv = &root->fs_info->trans_block_rsv; 6665 if (trans) {
6533 ret = btrfs_update_inode(trans, root, inode); 6666 trans->block_rsv = &root->fs_info->trans_block_rsv;
6534 if (ret && !err) 6667 ret = btrfs_update_inode(trans, root, inode);
6535 err = ret; 6668 if (ret && !err)
6669 err = ret;
6536 6670
6537 nr = trans->blocks_used; 6671 nr = trans->blocks_used;
6538 ret = btrfs_end_transaction_throttle(trans, root); 6672 ret = btrfs_end_transaction_throttle(trans, root);
6539 btrfs_btree_balance_dirty(root, nr); 6673 btrfs_btree_balance_dirty(root, nr);
6674 }
6540 6675
6541out: 6676out:
6542 btrfs_free_block_rsv(root, rsv); 6677 btrfs_free_block_rsv(root, rsv);
@@ -6605,6 +6740,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
6605 ei->orphan_meta_reserved = 0; 6740 ei->orphan_meta_reserved = 0;
6606 ei->dummy_inode = 0; 6741 ei->dummy_inode = 0;
6607 ei->in_defrag = 0; 6742 ei->in_defrag = 0;
6743 ei->delalloc_meta_reserved = 0;
6608 ei->force_compress = BTRFS_COMPRESS_NONE; 6744 ei->force_compress = BTRFS_COMPRESS_NONE;
6609 6745
6610 ei->delayed_node = NULL; 6746 ei->delayed_node = NULL;
@@ -6764,11 +6900,13 @@ static int btrfs_getattr(struct vfsmount *mnt,
6764 struct dentry *dentry, struct kstat *stat) 6900 struct dentry *dentry, struct kstat *stat)
6765{ 6901{
6766 struct inode *inode = dentry->d_inode; 6902 struct inode *inode = dentry->d_inode;
6903 u32 blocksize = inode->i_sb->s_blocksize;
6904
6767 generic_fillattr(inode, stat); 6905 generic_fillattr(inode, stat);
6768 stat->dev = BTRFS_I(inode)->root->anon_dev; 6906 stat->dev = BTRFS_I(inode)->root->anon_dev;
6769 stat->blksize = PAGE_CACHE_SIZE; 6907 stat->blksize = PAGE_CACHE_SIZE;
6770 stat->blocks = (inode_get_bytes(inode) + 6908 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
6771 BTRFS_I(inode)->delalloc_bytes) >> 9; 6909 ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
6772 return 0; 6910 return 0;
6773} 6911}
6774 6912
@@ -7044,14 +7182,21 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7044 goto out_unlock; 7182 goto out_unlock;
7045 } 7183 }
7046 7184
7185 /*
7186 * If the active LSM wants to access the inode during
7187 * d_instantiate it needs these. Smack checks to see
7188 * if the filesystem supports xattrs by looking at the
7189 * ops vector.
7190 */
7191 inode->i_fop = &btrfs_file_operations;
7192 inode->i_op = &btrfs_file_inode_operations;
7193
7047 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 7194 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
7048 if (err) 7195 if (err)
7049 drop_inode = 1; 7196 drop_inode = 1;
7050 else { 7197 else {
7051 inode->i_mapping->a_ops = &btrfs_aops; 7198 inode->i_mapping->a_ops = &btrfs_aops;
7052 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 7199 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7053 inode->i_fop = &btrfs_file_operations;
7054 inode->i_op = &btrfs_file_inode_operations;
7055 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 7200 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
7056 } 7201 }
7057 if (drop_inode) 7202 if (drop_inode)
@@ -7321,6 +7466,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
7321 .follow_link = page_follow_link_light, 7466 .follow_link = page_follow_link_light,
7322 .put_link = page_put_link, 7467 .put_link = page_put_link,
7323 .getattr = btrfs_getattr, 7468 .getattr = btrfs_getattr,
7469 .setattr = btrfs_setattr,
7324 .permission = btrfs_permission, 7470 .permission = btrfs_permission,
7325 .setxattr = btrfs_setxattr, 7471 .setxattr = btrfs_setxattr,
7326 .getxattr = btrfs_getxattr, 7472 .getxattr = btrfs_getxattr,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 4a34c472f126..c04f02c7d5bb 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -252,11 +252,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
252 trans = btrfs_join_transaction(root); 252 trans = btrfs_join_transaction(root);
253 BUG_ON(IS_ERR(trans)); 253 BUG_ON(IS_ERR(trans));
254 254
255 btrfs_update_iflags(inode);
256 inode->i_ctime = CURRENT_TIME;
255 ret = btrfs_update_inode(trans, root, inode); 257 ret = btrfs_update_inode(trans, root, inode);
256 BUG_ON(ret); 258 BUG_ON(ret);
257 259
258 btrfs_update_iflags(inode);
259 inode->i_ctime = CURRENT_TIME;
260 btrfs_end_transaction(trans, root); 260 btrfs_end_transaction(trans, root);
261 261
262 mnt_drop_write(file->f_path.mnt); 262 mnt_drop_write(file->f_path.mnt);
@@ -858,8 +858,10 @@ static int cluster_pages_for_defrag(struct inode *inode,
858 return 0; 858 return 0;
859 file_end = (isize - 1) >> PAGE_CACHE_SHIFT; 859 file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
860 860
861 mutex_lock(&inode->i_mutex);
861 ret = btrfs_delalloc_reserve_space(inode, 862 ret = btrfs_delalloc_reserve_space(inode,
862 num_pages << PAGE_CACHE_SHIFT); 863 num_pages << PAGE_CACHE_SHIFT);
864 mutex_unlock(&inode->i_mutex);
863 if (ret) 865 if (ret)
864 return ret; 866 return ret;
865again: 867again:
@@ -1216,12 +1218,12 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
1216 *devstr = '\0'; 1218 *devstr = '\0';
1217 devstr = vol_args->name; 1219 devstr = vol_args->name;
1218 devid = simple_strtoull(devstr, &end, 10); 1220 devid = simple_strtoull(devstr, &end, 10);
1219 printk(KERN_INFO "resizing devid %llu\n", 1221 printk(KERN_INFO "btrfs: resizing devid %llu\n",
1220 (unsigned long long)devid); 1222 (unsigned long long)devid);
1221 } 1223 }
1222 device = btrfs_find_device(root, devid, NULL, NULL); 1224 device = btrfs_find_device(root, devid, NULL, NULL);
1223 if (!device) { 1225 if (!device) {
1224 printk(KERN_INFO "resizer unable to find device %llu\n", 1226 printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
1225 (unsigned long long)devid); 1227 (unsigned long long)devid);
1226 ret = -EINVAL; 1228 ret = -EINVAL;
1227 goto out_unlock; 1229 goto out_unlock;
@@ -1267,7 +1269,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
1267 do_div(new_size, root->sectorsize); 1269 do_div(new_size, root->sectorsize);
1268 new_size *= root->sectorsize; 1270 new_size *= root->sectorsize;
1269 1271
1270 printk(KERN_INFO "new size for %s is %llu\n", 1272 printk(KERN_INFO "btrfs: new size for %s is %llu\n",
1271 device->name, (unsigned long long)new_size); 1273 device->name, (unsigned long long)new_size);
1272 1274
1273 if (new_size > old_size) { 1275 if (new_size > old_size) {
@@ -1278,7 +1280,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
1278 } 1280 }
1279 ret = btrfs_grow_device(trans, device, new_size); 1281 ret = btrfs_grow_device(trans, device, new_size);
1280 btrfs_commit_transaction(trans, root); 1282 btrfs_commit_transaction(trans, root);
1281 } else { 1283 } else if (new_size < old_size) {
1282 ret = btrfs_shrink_device(device, new_size); 1284 ret = btrfs_shrink_device(device, new_size);
1283 } 1285 }
1284 1286
@@ -2930,11 +2932,13 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
2930 goto out; 2932 goto out;
2931 2933
2932 for (i = 0; i < ipath->fspath->elem_cnt; ++i) { 2934 for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
2933 rel_ptr = ipath->fspath->val[i] - (u64)ipath->fspath->val; 2935 rel_ptr = ipath->fspath->val[i] -
2936 (u64)(unsigned long)ipath->fspath->val;
2934 ipath->fspath->val[i] = rel_ptr; 2937 ipath->fspath->val[i] = rel_ptr;
2935 } 2938 }
2936 2939
2937 ret = copy_to_user((void *)ipa->fspath, (void *)ipath->fspath, size); 2940 ret = copy_to_user((void *)(unsigned long)ipa->fspath,
2941 (void *)(unsigned long)ipath->fspath, size);
2938 if (ret) { 2942 if (ret) {
2939 ret = -EFAULT; 2943 ret = -EFAULT;
2940 goto out; 2944 goto out;
@@ -3017,7 +3021,8 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
3017 if (ret < 0) 3021 if (ret < 0)
3018 goto out; 3022 goto out;
3019 3023
3020 ret = copy_to_user((void *)loi->inodes, (void *)inodes, size); 3024 ret = copy_to_user((void *)(unsigned long)loi->inodes,
3025 (void *)(unsigned long)inodes, size);
3021 if (ret) 3026 if (ret)
3022 ret = -EFAULT; 3027 ret = -EFAULT;
3023 3028
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 24d654ce7a06..cfb55434a469 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1174,6 +1174,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
1174 list_add_tail(&new_edge->list[UPPER], 1174 list_add_tail(&new_edge->list[UPPER],
1175 &new_node->lower); 1175 &new_node->lower);
1176 } 1176 }
1177 } else {
1178 list_add_tail(&new_node->lower, &cache->leaves);
1177 } 1179 }
1178 1180
1179 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1181 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
@@ -2945,7 +2947,9 @@ static int relocate_file_extent_cluster(struct inode *inode,
2945 index = (cluster->start - offset) >> PAGE_CACHE_SHIFT; 2947 index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
2946 last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT; 2948 last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
2947 while (index <= last_index) { 2949 while (index <= last_index) {
2950 mutex_lock(&inode->i_mutex);
2948 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE); 2951 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
2952 mutex_unlock(&inode->i_mutex);
2949 if (ret) 2953 if (ret)
2950 goto out; 2954 goto out;
2951 2955
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index ed11d3866afd..ddf2c90d3fc0 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -256,6 +256,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
256 btrfs_release_path(swarn->path); 256 btrfs_release_path(swarn->path);
257 257
258 ipath = init_ipath(4096, local_root, swarn->path); 258 ipath = init_ipath(4096, local_root, swarn->path);
259 if (IS_ERR(ipath)) {
260 ret = PTR_ERR(ipath);
261 ipath = NULL;
262 goto err;
263 }
259 ret = paths_from_inode(inum, ipath); 264 ret = paths_from_inode(inum, ipath);
260 265
261 if (ret < 0) 266 if (ret < 0)
@@ -272,7 +277,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
272 swarn->logical, swarn->dev->name, 277 swarn->logical, swarn->dev->name,
273 (unsigned long long)swarn->sector, root, inum, offset, 278 (unsigned long long)swarn->sector, root, inum, offset,
274 min(isize - offset, (u64)PAGE_SIZE), nlink, 279 min(isize - offset, (u64)PAGE_SIZE), nlink,
275 (char *)ipath->fspath->val[i]); 280 (char *)(unsigned long)ipath->fspath->val[i]);
276 281
277 free_ipath(ipath); 282 free_ipath(ipath);
278 return 0; 283 return 0;
@@ -944,50 +949,18 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
944static int scrub_submit(struct scrub_dev *sdev) 949static int scrub_submit(struct scrub_dev *sdev)
945{ 950{
946 struct scrub_bio *sbio; 951 struct scrub_bio *sbio;
947 struct bio *bio;
948 int i;
949 952
950 if (sdev->curr == -1) 953 if (sdev->curr == -1)
951 return 0; 954 return 0;
952 955
953 sbio = sdev->bios[sdev->curr]; 956 sbio = sdev->bios[sdev->curr];
954
955 bio = bio_alloc(GFP_NOFS, sbio->count);
956 if (!bio)
957 goto nomem;
958
959 bio->bi_private = sbio;
960 bio->bi_end_io = scrub_bio_end_io;
961 bio->bi_bdev = sdev->dev->bdev;
962 bio->bi_sector = sbio->physical >> 9;
963
964 for (i = 0; i < sbio->count; ++i) {
965 struct page *page;
966 int ret;
967
968 page = alloc_page(GFP_NOFS);
969 if (!page)
970 goto nomem;
971
972 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
973 if (!ret) {
974 __free_page(page);
975 goto nomem;
976 }
977 }
978
979 sbio->err = 0; 957 sbio->err = 0;
980 sdev->curr = -1; 958 sdev->curr = -1;
981 atomic_inc(&sdev->in_flight); 959 atomic_inc(&sdev->in_flight);
982 960
983 submit_bio(READ, bio); 961 submit_bio(READ, sbio->bio);
984 962
985 return 0; 963 return 0;
986
987nomem:
988 scrub_free_bio(bio);
989
990 return -ENOMEM;
991} 964}
992 965
993static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, 966static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
@@ -995,6 +968,8 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
995 u8 *csum, int force) 968 u8 *csum, int force)
996{ 969{
997 struct scrub_bio *sbio; 970 struct scrub_bio *sbio;
971 struct page *page;
972 int ret;
998 973
999again: 974again:
1000 /* 975 /*
@@ -1015,12 +990,22 @@ again:
1015 } 990 }
1016 sbio = sdev->bios[sdev->curr]; 991 sbio = sdev->bios[sdev->curr];
1017 if (sbio->count == 0) { 992 if (sbio->count == 0) {
993 struct bio *bio;
994
1018 sbio->physical = physical; 995 sbio->physical = physical;
1019 sbio->logical = logical; 996 sbio->logical = logical;
997 bio = bio_alloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
998 if (!bio)
999 return -ENOMEM;
1000
1001 bio->bi_private = sbio;
1002 bio->bi_end_io = scrub_bio_end_io;
1003 bio->bi_bdev = sdev->dev->bdev;
1004 bio->bi_sector = sbio->physical >> 9;
1005 sbio->err = 0;
1006 sbio->bio = bio;
1020 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || 1007 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
1021 sbio->logical + sbio->count * PAGE_SIZE != logical) { 1008 sbio->logical + sbio->count * PAGE_SIZE != logical) {
1022 int ret;
1023
1024 ret = scrub_submit(sdev); 1009 ret = scrub_submit(sdev);
1025 if (ret) 1010 if (ret)
1026 return ret; 1011 return ret;
@@ -1030,6 +1015,20 @@ again:
1030 sbio->spag[sbio->count].generation = gen; 1015 sbio->spag[sbio->count].generation = gen;
1031 sbio->spag[sbio->count].have_csum = 0; 1016 sbio->spag[sbio->count].have_csum = 0;
1032 sbio->spag[sbio->count].mirror_num = mirror_num; 1017 sbio->spag[sbio->count].mirror_num = mirror_num;
1018
1019 page = alloc_page(GFP_NOFS);
1020 if (!page)
1021 return -ENOMEM;
1022
1023 ret = bio_add_page(sbio->bio, page, PAGE_SIZE, 0);
1024 if (!ret) {
1025 __free_page(page);
1026 ret = scrub_submit(sdev);
1027 if (ret)
1028 return ret;
1029 goto again;
1030 }
1031
1033 if (csum) { 1032 if (csum) {
1034 sbio->spag[sbio->count].have_csum = 1; 1033 sbio->spag[sbio->count].have_csum = 1;
1035 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); 1034 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
@@ -1536,18 +1535,22 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
1536static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) 1535static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
1537{ 1536{
1538 struct btrfs_fs_info *fs_info = root->fs_info; 1537 struct btrfs_fs_info *fs_info = root->fs_info;
1538 int ret = 0;
1539 1539
1540 mutex_lock(&fs_info->scrub_lock); 1540 mutex_lock(&fs_info->scrub_lock);
1541 if (fs_info->scrub_workers_refcnt == 0) { 1541 if (fs_info->scrub_workers_refcnt == 0) {
1542 btrfs_init_workers(&fs_info->scrub_workers, "scrub", 1542 btrfs_init_workers(&fs_info->scrub_workers, "scrub",
1543 fs_info->thread_pool_size, &fs_info->generic_worker); 1543 fs_info->thread_pool_size, &fs_info->generic_worker);
1544 fs_info->scrub_workers.idle_thresh = 4; 1544 fs_info->scrub_workers.idle_thresh = 4;
1545 btrfs_start_workers(&fs_info->scrub_workers, 1); 1545 ret = btrfs_start_workers(&fs_info->scrub_workers);
1546 if (ret)
1547 goto out;
1546 } 1548 }
1547 ++fs_info->scrub_workers_refcnt; 1549 ++fs_info->scrub_workers_refcnt;
1550out:
1548 mutex_unlock(&fs_info->scrub_lock); 1551 mutex_unlock(&fs_info->scrub_lock);
1549 1552
1550 return 0; 1553 return ret;
1551} 1554}
1552 1555
1553static noinline_for_stack void scrub_workers_put(struct btrfs_root *root) 1556static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 57080dffdfc6..200f63bc6675 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -41,6 +41,7 @@
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/cleancache.h> 42#include <linux/cleancache.h>
43#include <linux/mnt_namespace.h> 43#include <linux/mnt_namespace.h>
44#include <linux/ratelimit.h>
44#include "compat.h" 45#include "compat.h"
45#include "delayed-inode.h" 46#include "delayed-inode.h"
46#include "ctree.h" 47#include "ctree.h"
@@ -197,7 +198,7 @@ static match_table_t tokens = {
197 {Opt_subvolrootid, "subvolrootid=%d"}, 198 {Opt_subvolrootid, "subvolrootid=%d"},
198 {Opt_defrag, "autodefrag"}, 199 {Opt_defrag, "autodefrag"},
199 {Opt_inode_cache, "inode_cache"}, 200 {Opt_inode_cache, "inode_cache"},
200 {Opt_no_space_cache, "no_space_cache"}, 201 {Opt_no_space_cache, "nospace_cache"},
201 {Opt_recovery, "recovery"}, 202 {Opt_recovery, "recovery"},
202 {Opt_err, NULL}, 203 {Opt_err, NULL},
203}; 204};
@@ -448,6 +449,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
448 token = match_token(p, tokens, args); 449 token = match_token(p, tokens, args);
449 switch (token) { 450 switch (token) {
450 case Opt_subvol: 451 case Opt_subvol:
452 kfree(*subvol_name);
451 *subvol_name = match_strdup(&args[0]); 453 *subvol_name = match_strdup(&args[0]);
452 break; 454 break;
453 case Opt_subvolid: 455 case Opt_subvolid:
@@ -710,7 +712,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
710 if (btrfs_test_opt(root, SPACE_CACHE)) 712 if (btrfs_test_opt(root, SPACE_CACHE))
711 seq_puts(seq, ",space_cache"); 713 seq_puts(seq, ",space_cache");
712 else 714 else
713 seq_puts(seq, ",no_space_cache"); 715 seq_puts(seq, ",nospace_cache");
714 if (btrfs_test_opt(root, CLEAR_CACHE)) 716 if (btrfs_test_opt(root, CLEAR_CACHE))
715 seq_puts(seq, ",clear_cache"); 717 seq_puts(seq, ",clear_cache");
716 if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) 718 if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
@@ -824,13 +826,9 @@ static char *setup_root_args(char *args)
824static struct dentry *mount_subvol(const char *subvol_name, int flags, 826static struct dentry *mount_subvol(const char *subvol_name, int flags,
825 const char *device_name, char *data) 827 const char *device_name, char *data)
826{ 828{
827 struct super_block *s;
828 struct dentry *root; 829 struct dentry *root;
829 struct vfsmount *mnt; 830 struct vfsmount *mnt;
830 struct mnt_namespace *ns_private;
831 char *newargs; 831 char *newargs;
832 struct path path;
833 int error;
834 832
835 newargs = setup_root_args(data); 833 newargs = setup_root_args(data);
836 if (!newargs) 834 if (!newargs)
@@ -841,39 +839,17 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags,
841 if (IS_ERR(mnt)) 839 if (IS_ERR(mnt))
842 return ERR_CAST(mnt); 840 return ERR_CAST(mnt);
843 841
844 ns_private = create_mnt_ns(mnt); 842 root = mount_subtree(mnt, subvol_name);
845 if (IS_ERR(ns_private)) {
846 mntput(mnt);
847 return ERR_CAST(ns_private);
848 }
849
850 /*
851 * This will trigger the automount of the subvol so we can just
852 * drop the mnt we have here and return the dentry that we
853 * found.
854 */
855 error = vfs_path_lookup(mnt->mnt_root, mnt, subvol_name,
856 LOOKUP_FOLLOW, &path);
857 put_mnt_ns(ns_private);
858 if (error)
859 return ERR_PTR(error);
860 843
861 if (!is_subvolume_inode(path.dentry->d_inode)) { 844 if (!IS_ERR(root) && !is_subvolume_inode(root->d_inode)) {
862 path_put(&path); 845 struct super_block *s = root->d_sb;
863 mntput(mnt); 846 dput(root);
864 error = -EINVAL; 847 root = ERR_PTR(-EINVAL);
848 deactivate_locked_super(s);
865 printk(KERN_ERR "btrfs: '%s' is not a valid subvolume\n", 849 printk(KERN_ERR "btrfs: '%s' is not a valid subvolume\n",
866 subvol_name); 850 subvol_name);
867 return ERR_PTR(-EINVAL);
868 } 851 }
869 852
870 /* Get a ref to the sb and the dentry we found and return it */
871 s = path.mnt->mnt_sb;
872 atomic_inc(&s->s_active);
873 root = dget(path.dentry);
874 path_put(&path);
875 down_write(&s->s_umount);
876
877 return root; 853 return root;
878} 854}
879 855
@@ -890,7 +866,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
890 struct super_block *s; 866 struct super_block *s;
891 struct dentry *root; 867 struct dentry *root;
892 struct btrfs_fs_devices *fs_devices = NULL; 868 struct btrfs_fs_devices *fs_devices = NULL;
893 struct btrfs_root *tree_root = NULL;
894 struct btrfs_fs_info *fs_info = NULL; 869 struct btrfs_fs_info *fs_info = NULL;
895 fmode_t mode = FMODE_READ; 870 fmode_t mode = FMODE_READ;
896 char *subvol_name = NULL; 871 char *subvol_name = NULL;
@@ -904,8 +879,10 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
904 error = btrfs_parse_early_options(data, mode, fs_type, 879 error = btrfs_parse_early_options(data, mode, fs_type,
905 &subvol_name, &subvol_objectid, 880 &subvol_name, &subvol_objectid,
906 &subvol_rootid, &fs_devices); 881 &subvol_rootid, &fs_devices);
907 if (error) 882 if (error) {
883 kfree(subvol_name);
908 return ERR_PTR(error); 884 return ERR_PTR(error);
885 }
909 886
910 if (subvol_name) { 887 if (subvol_name) {
911 root = mount_subvol(subvol_name, flags, device_name, data); 888 root = mount_subvol(subvol_name, flags, device_name, data);
@@ -917,15 +894,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
917 if (error) 894 if (error)
918 return ERR_PTR(error); 895 return ERR_PTR(error);
919 896
920 error = btrfs_open_devices(fs_devices, mode, fs_type);
921 if (error)
922 return ERR_PTR(error);
923
924 if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
925 error = -EACCES;
926 goto error_close_devices;
927 }
928
929 /* 897 /*
930 * Setup a dummy root and fs_info for test/set super. This is because 898 * Setup a dummy root and fs_info for test/set super. This is because
931 * we don't actually fill this stuff out until open_ctree, but we need 899 * we don't actually fill this stuff out until open_ctree, but we need
@@ -933,24 +901,36 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
933 * then open_ctree will properly initialize everything later. 901 * then open_ctree will properly initialize everything later.
934 */ 902 */
935 fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS); 903 fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS);
936 tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); 904 if (!fs_info)
937 if (!fs_info || !tree_root) { 905 return ERR_PTR(-ENOMEM);
906
907 fs_info->tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
908 if (!fs_info->tree_root) {
938 error = -ENOMEM; 909 error = -ENOMEM;
939 goto error_close_devices; 910 goto error_fs_info;
940 } 911 }
941 fs_info->tree_root = tree_root; 912 fs_info->tree_root->fs_info = fs_info;
942 fs_info->fs_devices = fs_devices; 913 fs_info->fs_devices = fs_devices;
943 tree_root->fs_info = fs_info;
944 914
945 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); 915 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
946 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); 916 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
947 if (!fs_info->super_copy || !fs_info->super_for_commit) { 917 if (!fs_info->super_copy || !fs_info->super_for_commit) {
948 error = -ENOMEM; 918 error = -ENOMEM;
919 goto error_fs_info;
920 }
921
922 error = btrfs_open_devices(fs_devices, mode, fs_type);
923 if (error)
924 goto error_fs_info;
925
926 if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
927 error = -EACCES;
949 goto error_close_devices; 928 goto error_close_devices;
950 } 929 }
951 930
952 bdev = fs_devices->latest_bdev; 931 bdev = fs_devices->latest_bdev;
953 s = sget(fs_type, btrfs_test_super, btrfs_set_super, tree_root); 932 s = sget(fs_type, btrfs_test_super, btrfs_set_super,
933 fs_info->tree_root);
954 if (IS_ERR(s)) { 934 if (IS_ERR(s)) {
955 error = PTR_ERR(s); 935 error = PTR_ERR(s);
956 goto error_close_devices; 936 goto error_close_devices;
@@ -959,12 +939,12 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
959 if (s->s_root) { 939 if (s->s_root) {
960 if ((flags ^ s->s_flags) & MS_RDONLY) { 940 if ((flags ^ s->s_flags) & MS_RDONLY) {
961 deactivate_locked_super(s); 941 deactivate_locked_super(s);
962 return ERR_PTR(-EBUSY); 942 error = -EBUSY;
943 goto error_close_devices;
963 } 944 }
964 945
965 btrfs_close_devices(fs_devices); 946 btrfs_close_devices(fs_devices);
966 free_fs_info(fs_info); 947 free_fs_info(fs_info);
967 kfree(tree_root);
968 } else { 948 } else {
969 char b[BDEVNAME_SIZE]; 949 char b[BDEVNAME_SIZE];
970 950
@@ -991,8 +971,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
991 971
992error_close_devices: 972error_close_devices:
993 btrfs_close_devices(fs_devices); 973 btrfs_close_devices(fs_devices);
974error_fs_info:
994 free_fs_info(fs_info); 975 free_fs_info(fs_info);
995 kfree(tree_root);
996 return ERR_PTR(error); 976 return ERR_PTR(error);
997} 977}
998 978
@@ -1074,11 +1054,11 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1074 u64 avail_space; 1054 u64 avail_space;
1075 u64 used_space; 1055 u64 used_space;
1076 u64 min_stripe_size; 1056 u64 min_stripe_size;
1077 int min_stripes = 1; 1057 int min_stripes = 1, num_stripes = 1;
1078 int i = 0, nr_devices; 1058 int i = 0, nr_devices;
1079 int ret; 1059 int ret;
1080 1060
1081 nr_devices = fs_info->fs_devices->rw_devices; 1061 nr_devices = fs_info->fs_devices->open_devices;
1082 BUG_ON(!nr_devices); 1062 BUG_ON(!nr_devices);
1083 1063
1084 devices_info = kmalloc(sizeof(*devices_info) * nr_devices, 1064 devices_info = kmalloc(sizeof(*devices_info) * nr_devices,
@@ -1088,20 +1068,24 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1088 1068
1089 /* calc min stripe number for data space alloction */ 1069 /* calc min stripe number for data space alloction */
1090 type = btrfs_get_alloc_profile(root, 1); 1070 type = btrfs_get_alloc_profile(root, 1);
1091 if (type & BTRFS_BLOCK_GROUP_RAID0) 1071 if (type & BTRFS_BLOCK_GROUP_RAID0) {
1092 min_stripes = 2; 1072 min_stripes = 2;
1093 else if (type & BTRFS_BLOCK_GROUP_RAID1) 1073 num_stripes = nr_devices;
1074 } else if (type & BTRFS_BLOCK_GROUP_RAID1) {
1094 min_stripes = 2; 1075 min_stripes = 2;
1095 else if (type & BTRFS_BLOCK_GROUP_RAID10) 1076 num_stripes = 2;
1077 } else if (type & BTRFS_BLOCK_GROUP_RAID10) {
1096 min_stripes = 4; 1078 min_stripes = 4;
1079 num_stripes = 4;
1080 }
1097 1081
1098 if (type & BTRFS_BLOCK_GROUP_DUP) 1082 if (type & BTRFS_BLOCK_GROUP_DUP)
1099 min_stripe_size = 2 * BTRFS_STRIPE_LEN; 1083 min_stripe_size = 2 * BTRFS_STRIPE_LEN;
1100 else 1084 else
1101 min_stripe_size = BTRFS_STRIPE_LEN; 1085 min_stripe_size = BTRFS_STRIPE_LEN;
1102 1086
1103 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 1087 list_for_each_entry(device, &fs_devices->devices, dev_list) {
1104 if (!device->in_fs_metadata) 1088 if (!device->in_fs_metadata || !device->bdev)
1105 continue; 1089 continue;
1106 1090
1107 avail_space = device->total_bytes - device->bytes_used; 1091 avail_space = device->total_bytes - device->bytes_used;
@@ -1162,13 +1146,16 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1162 i = nr_devices - 1; 1146 i = nr_devices - 1;
1163 avail_space = 0; 1147 avail_space = 0;
1164 while (nr_devices >= min_stripes) { 1148 while (nr_devices >= min_stripes) {
1149 if (num_stripes > nr_devices)
1150 num_stripes = nr_devices;
1151
1165 if (devices_info[i].max_avail >= min_stripe_size) { 1152 if (devices_info[i].max_avail >= min_stripe_size) {
1166 int j; 1153 int j;
1167 u64 alloc_size; 1154 u64 alloc_size;
1168 1155
1169 avail_space += devices_info[i].max_avail * min_stripes; 1156 avail_space += devices_info[i].max_avail * num_stripes;
1170 alloc_size = devices_info[i].max_avail; 1157 alloc_size = devices_info[i].max_avail;
1171 for (j = i + 1 - min_stripes; j <= i; j++) 1158 for (j = i + 1 - num_stripes; j <= i; j++)
1172 devices_info[j].max_avail -= alloc_size; 1159 devices_info[j].max_avail -= alloc_size;
1173 } 1160 }
1174 i--; 1161 i--;
@@ -1285,6 +1272,16 @@ static int btrfs_unfreeze(struct super_block *sb)
1285 return 0; 1272 return 0;
1286} 1273}
1287 1274
1275static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
1276{
1277 int ret;
1278
1279 ret = btrfs_dirty_inode(inode);
1280 if (ret)
1281 printk_ratelimited(KERN_ERR "btrfs: fail to dirty inode %Lu "
1282 "error %d\n", btrfs_ino(inode), ret);
1283}
1284
1288static const struct super_operations btrfs_super_ops = { 1285static const struct super_operations btrfs_super_ops = {
1289 .drop_inode = btrfs_drop_inode, 1286 .drop_inode = btrfs_drop_inode,
1290 .evict_inode = btrfs_evict_inode, 1287 .evict_inode = btrfs_evict_inode,
@@ -1292,7 +1289,7 @@ static const struct super_operations btrfs_super_ops = {
1292 .sync_fs = btrfs_sync_fs, 1289 .sync_fs = btrfs_sync_fs,
1293 .show_options = btrfs_show_options, 1290 .show_options = btrfs_show_options,
1294 .write_inode = btrfs_write_inode, 1291 .write_inode = btrfs_write_inode,
1295 .dirty_inode = btrfs_dirty_inode, 1292 .dirty_inode = btrfs_fs_dirty_inode,
1296 .alloc_inode = btrfs_alloc_inode, 1293 .alloc_inode = btrfs_alloc_inode,
1297 .destroy_inode = btrfs_destroy_inode, 1294 .destroy_inode = btrfs_destroy_inode,
1298 .statfs = btrfs_statfs, 1295 .statfs = btrfs_statfs,
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 960835eaf4da..81376d94cd3c 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -785,6 +785,10 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
785 785
786 btrfs_save_ino_cache(root, trans); 786 btrfs_save_ino_cache(root, trans);
787 787
788 /* see comments in should_cow_block() */
789 root->force_cow = 0;
790 smp_wmb();
791
788 if (root->commit_root != root->node) { 792 if (root->commit_root != root->node) {
789 mutex_lock(&root->fs_commit_mutex); 793 mutex_lock(&root->fs_commit_mutex);
790 switch_commit_root(root); 794 switch_commit_root(root);
@@ -882,8 +886,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
882 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); 886 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
883 887
884 if (to_reserve > 0) { 888 if (to_reserve > 0) {
885 ret = btrfs_block_rsv_add(root, &pending->block_rsv, 889 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
886 to_reserve); 890 to_reserve);
887 if (ret) { 891 if (ret) {
888 pending->error = ret; 892 pending->error = ret;
889 goto fail; 893 goto fail;
@@ -947,6 +951,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
947 btrfs_tree_unlock(old); 951 btrfs_tree_unlock(old);
948 free_extent_buffer(old); 952 free_extent_buffer(old);
949 953
954 /* see comments in should_cow_block() */
955 root->force_cow = 1;
956 smp_wmb();
957
950 btrfs_set_root_node(new_root_item, tmp); 958 btrfs_set_root_node(new_root_item, tmp);
951 /* record when the snapshot was created in key.offset */ 959 /* record when the snapshot was created in key.offset */
952 key.offset = trans->transid; 960 key.offset = trans->transid;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index f8e2943101a1..f4b839fd3c9d 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -295,6 +295,12 @@ loop_lock:
295 btrfs_requeue_work(&device->work); 295 btrfs_requeue_work(&device->work);
296 goto done; 296 goto done;
297 } 297 }
298 /* unplug every 64 requests just for good measure */
299 if (batch_run % 64 == 0) {
300 blk_finish_plug(&plug);
301 blk_start_plug(&plug);
302 sync_pending = 0;
303 }
298 } 304 }
299 305
300 cond_resched(); 306 cond_resched();
@@ -999,7 +1005,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
999 key.objectid = device->devid; 1005 key.objectid = device->devid;
1000 key.offset = start; 1006 key.offset = start;
1001 key.type = BTRFS_DEV_EXTENT_KEY; 1007 key.type = BTRFS_DEV_EXTENT_KEY;
1002 1008again:
1003 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1009 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1004 if (ret > 0) { 1010 if (ret > 0) {
1005 ret = btrfs_previous_item(root, path, key.objectid, 1011 ret = btrfs_previous_item(root, path, key.objectid,
@@ -1012,6 +1018,9 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1012 struct btrfs_dev_extent); 1018 struct btrfs_dev_extent);
1013 BUG_ON(found_key.offset > start || found_key.offset + 1019 BUG_ON(found_key.offset > start || found_key.offset +
1014 btrfs_dev_extent_length(leaf, extent) < start); 1020 btrfs_dev_extent_length(leaf, extent) < start);
1021 key = found_key;
1022 btrfs_release_path(path);
1023 goto again;
1015 } else if (ret == 0) { 1024 } else if (ret == 0) {
1016 leaf = path->nodes[0]; 1025 leaf = path->nodes[0];
1017 extent = btrfs_item_ptr(leaf, path->slots[0], 1026 extent = btrfs_item_ptr(leaf, path->slots[0],
@@ -1608,7 +1617,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1608 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) 1617 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1609 return -EINVAL; 1618 return -EINVAL;
1610 1619
1611 bdev = blkdev_get_by_path(device_path, FMODE_EXCL, 1620 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1612 root->fs_info->bdev_holder); 1621 root->fs_info->bdev_holder);
1613 if (IS_ERR(bdev)) 1622 if (IS_ERR(bdev))
1614 return PTR_ERR(bdev); 1623 return PTR_ERR(bdev);
@@ -3255,7 +3264,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
3255 */ 3264 */
3256 if (atomic_read(&bbio->error) > bbio->max_errors) { 3265 if (atomic_read(&bbio->error) > bbio->max_errors) {
3257 err = -EIO; 3266 err = -EIO;
3258 } else if (err) { 3267 } else {
3259 /* 3268 /*
3260 * this bio is actually up to date, we didn't 3269 * this bio is actually up to date, we didn't
3261 * go over the max number of errors 3270 * go over the max number of errors
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index ab5b1c49f352..78f2d4d4f37f 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -100,6 +100,12 @@ struct btrfs_device {
100 struct reada_zone *reada_curr_zone; 100 struct reada_zone *reada_curr_zone;
101 struct radix_tree_root reada_zones; 101 struct radix_tree_root reada_zones;
102 struct radix_tree_root reada_extents; 102 struct radix_tree_root reada_extents;
103
104 /* for sending down flush barriers */
105 struct bio *flush_bio;
106 struct completion flush_wait;
107 int nobarriers;
108
103}; 109};
104 110
105struct btrfs_fs_devices { 111struct btrfs_fs_devices {
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 4144caf2f9d3..173b1d22e59b 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -87,7 +87,7 @@ static int ceph_set_page_dirty(struct page *page)
87 snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context); 87 snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
88 88
89 /* dirty the head */ 89 /* dirty the head */
90 spin_lock(&inode->i_lock); 90 spin_lock(&ci->i_ceph_lock);
91 if (ci->i_head_snapc == NULL) 91 if (ci->i_head_snapc == NULL)
92 ci->i_head_snapc = ceph_get_snap_context(snapc); 92 ci->i_head_snapc = ceph_get_snap_context(snapc);
93 ++ci->i_wrbuffer_ref_head; 93 ++ci->i_wrbuffer_ref_head;
@@ -100,7 +100,7 @@ static int ceph_set_page_dirty(struct page *page)
100 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 100 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
101 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 101 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
102 snapc, snapc->seq, snapc->num_snaps); 102 snapc, snapc->seq, snapc->num_snaps);
103 spin_unlock(&inode->i_lock); 103 spin_unlock(&ci->i_ceph_lock);
104 104
105 /* now adjust page */ 105 /* now adjust page */
106 spin_lock_irq(&mapping->tree_lock); 106 spin_lock_irq(&mapping->tree_lock);
@@ -391,7 +391,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
391 struct ceph_snap_context *snapc = NULL; 391 struct ceph_snap_context *snapc = NULL;
392 struct ceph_cap_snap *capsnap = NULL; 392 struct ceph_cap_snap *capsnap = NULL;
393 393
394 spin_lock(&inode->i_lock); 394 spin_lock(&ci->i_ceph_lock);
395 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 395 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
396 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 396 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
397 capsnap->context, capsnap->dirty_pages); 397 capsnap->context, capsnap->dirty_pages);
@@ -407,7 +407,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
407 dout(" head snapc %p has %d dirty pages\n", 407 dout(" head snapc %p has %d dirty pages\n",
408 snapc, ci->i_wrbuffer_ref_head); 408 snapc, ci->i_wrbuffer_ref_head);
409 } 409 }
410 spin_unlock(&inode->i_lock); 410 spin_unlock(&ci->i_ceph_lock);
411 return snapc; 411 return snapc;
412} 412}
413 413
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 0f327c6c9679..8b53193e4f7c 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -309,7 +309,7 @@ void ceph_reservation_status(struct ceph_fs_client *fsc,
309/* 309/*
310 * Find ceph_cap for given mds, if any. 310 * Find ceph_cap for given mds, if any.
311 * 311 *
312 * Called with i_lock held. 312 * Called with i_ceph_lock held.
313 */ 313 */
314static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds) 314static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
315{ 315{
@@ -332,9 +332,9 @@ struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
332{ 332{
333 struct ceph_cap *cap; 333 struct ceph_cap *cap;
334 334
335 spin_lock(&ci->vfs_inode.i_lock); 335 spin_lock(&ci->i_ceph_lock);
336 cap = __get_cap_for_mds(ci, mds); 336 cap = __get_cap_for_mds(ci, mds);
337 spin_unlock(&ci->vfs_inode.i_lock); 337 spin_unlock(&ci->i_ceph_lock);
338 return cap; 338 return cap;
339} 339}
340 340
@@ -361,15 +361,16 @@ static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
361 361
362int ceph_get_cap_mds(struct inode *inode) 362int ceph_get_cap_mds(struct inode *inode)
363{ 363{
364 struct ceph_inode_info *ci = ceph_inode(inode);
364 int mds; 365 int mds;
365 spin_lock(&inode->i_lock); 366 spin_lock(&ci->i_ceph_lock);
366 mds = __ceph_get_cap_mds(ceph_inode(inode)); 367 mds = __ceph_get_cap_mds(ceph_inode(inode));
367 spin_unlock(&inode->i_lock); 368 spin_unlock(&ci->i_ceph_lock);
368 return mds; 369 return mds;
369} 370}
370 371
371/* 372/*
372 * Called under i_lock. 373 * Called under i_ceph_lock.
373 */ 374 */
374static void __insert_cap_node(struct ceph_inode_info *ci, 375static void __insert_cap_node(struct ceph_inode_info *ci,
375 struct ceph_cap *new) 376 struct ceph_cap *new)
@@ -415,7 +416,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
415 * 416 *
416 * If I_FLUSH is set, leave the inode at the front of the list. 417 * If I_FLUSH is set, leave the inode at the front of the list.
417 * 418 *
418 * Caller holds i_lock 419 * Caller holds i_ceph_lock
419 * -> we take mdsc->cap_delay_lock 420 * -> we take mdsc->cap_delay_lock
420 */ 421 */
421static void __cap_delay_requeue(struct ceph_mds_client *mdsc, 422static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
@@ -457,7 +458,7 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
457/* 458/*
458 * Cancel delayed work on cap. 459 * Cancel delayed work on cap.
459 * 460 *
460 * Caller must hold i_lock. 461 * Caller must hold i_ceph_lock.
461 */ 462 */
462static void __cap_delay_cancel(struct ceph_mds_client *mdsc, 463static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
463 struct ceph_inode_info *ci) 464 struct ceph_inode_info *ci)
@@ -532,14 +533,14 @@ int ceph_add_cap(struct inode *inode,
532 wanted |= ceph_caps_for_mode(fmode); 533 wanted |= ceph_caps_for_mode(fmode);
533 534
534retry: 535retry:
535 spin_lock(&inode->i_lock); 536 spin_lock(&ci->i_ceph_lock);
536 cap = __get_cap_for_mds(ci, mds); 537 cap = __get_cap_for_mds(ci, mds);
537 if (!cap) { 538 if (!cap) {
538 if (new_cap) { 539 if (new_cap) {
539 cap = new_cap; 540 cap = new_cap;
540 new_cap = NULL; 541 new_cap = NULL;
541 } else { 542 } else {
542 spin_unlock(&inode->i_lock); 543 spin_unlock(&ci->i_ceph_lock);
543 new_cap = get_cap(mdsc, caps_reservation); 544 new_cap = get_cap(mdsc, caps_reservation);
544 if (new_cap == NULL) 545 if (new_cap == NULL)
545 return -ENOMEM; 546 return -ENOMEM;
@@ -625,7 +626,7 @@ retry:
625 626
626 if (fmode >= 0) 627 if (fmode >= 0)
627 __ceph_get_fmode(ci, fmode); 628 __ceph_get_fmode(ci, fmode);
628 spin_unlock(&inode->i_lock); 629 spin_unlock(&ci->i_ceph_lock);
629 wake_up_all(&ci->i_cap_wq); 630 wake_up_all(&ci->i_cap_wq);
630 return 0; 631 return 0;
631} 632}
@@ -792,7 +793,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
792 struct rb_node *p; 793 struct rb_node *p;
793 int ret = 0; 794 int ret = 0;
794 795
795 spin_lock(&inode->i_lock); 796 spin_lock(&ci->i_ceph_lock);
796 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 797 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
797 cap = rb_entry(p, struct ceph_cap, ci_node); 798 cap = rb_entry(p, struct ceph_cap, ci_node);
798 if (__cap_is_valid(cap) && 799 if (__cap_is_valid(cap) &&
@@ -801,7 +802,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
801 break; 802 break;
802 } 803 }
803 } 804 }
804 spin_unlock(&inode->i_lock); 805 spin_unlock(&ci->i_ceph_lock);
805 dout("ceph_caps_revoking %p %s = %d\n", inode, 806 dout("ceph_caps_revoking %p %s = %d\n", inode,
806 ceph_cap_string(mask), ret); 807 ceph_cap_string(mask), ret);
807 return ret; 808 return ret;
@@ -855,7 +856,7 @@ int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
855} 856}
856 857
857/* 858/*
858 * called under i_lock 859 * called under i_ceph_lock
859 */ 860 */
860static int __ceph_is_any_caps(struct ceph_inode_info *ci) 861static int __ceph_is_any_caps(struct ceph_inode_info *ci)
861{ 862{
@@ -865,7 +866,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
865/* 866/*
866 * Remove a cap. Take steps to deal with a racing iterate_session_caps. 867 * Remove a cap. Take steps to deal with a racing iterate_session_caps.
867 * 868 *
868 * caller should hold i_lock. 869 * caller should hold i_ceph_lock.
869 * caller will not hold session s_mutex if called from destroy_inode. 870 * caller will not hold session s_mutex if called from destroy_inode.
870 */ 871 */
871void __ceph_remove_cap(struct ceph_cap *cap) 872void __ceph_remove_cap(struct ceph_cap *cap)
@@ -1028,7 +1029,7 @@ static void __queue_cap_release(struct ceph_mds_session *session,
1028 1029
1029/* 1030/*
1030 * Queue cap releases when an inode is dropped from our cache. Since 1031 * Queue cap releases when an inode is dropped from our cache. Since
1031 * inode is about to be destroyed, there is no need for i_lock. 1032 * inode is about to be destroyed, there is no need for i_ceph_lock.
1032 */ 1033 */
1033void ceph_queue_caps_release(struct inode *inode) 1034void ceph_queue_caps_release(struct inode *inode)
1034{ 1035{
@@ -1049,7 +1050,7 @@ void ceph_queue_caps_release(struct inode *inode)
1049 1050
1050/* 1051/*
1051 * Send a cap msg on the given inode. Update our caps state, then 1052 * Send a cap msg on the given inode. Update our caps state, then
1052 * drop i_lock and send the message. 1053 * drop i_ceph_lock and send the message.
1053 * 1054 *
1054 * Make note of max_size reported/requested from mds, revoked caps 1055 * Make note of max_size reported/requested from mds, revoked caps
1055 * that have now been implemented. 1056 * that have now been implemented.
@@ -1061,13 +1062,13 @@ void ceph_queue_caps_release(struct inode *inode)
1061 * Return non-zero if delayed release, or we experienced an error 1062 * Return non-zero if delayed release, or we experienced an error
1062 * such that the caller should requeue + retry later. 1063 * such that the caller should requeue + retry later.
1063 * 1064 *
1064 * called with i_lock, then drops it. 1065 * called with i_ceph_lock, then drops it.
1065 * caller should hold snap_rwsem (read), s_mutex. 1066 * caller should hold snap_rwsem (read), s_mutex.
1066 */ 1067 */
1067static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, 1068static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1068 int op, int used, int want, int retain, int flushing, 1069 int op, int used, int want, int retain, int flushing,
1069 unsigned *pflush_tid) 1070 unsigned *pflush_tid)
1070 __releases(cap->ci->vfs_inode->i_lock) 1071 __releases(cap->ci->i_ceph_lock)
1071{ 1072{
1072 struct ceph_inode_info *ci = cap->ci; 1073 struct ceph_inode_info *ci = cap->ci;
1073 struct inode *inode = &ci->vfs_inode; 1074 struct inode *inode = &ci->vfs_inode;
@@ -1170,7 +1171,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1170 xattr_version = ci->i_xattrs.version; 1171 xattr_version = ci->i_xattrs.version;
1171 } 1172 }
1172 1173
1173 spin_unlock(&inode->i_lock); 1174 spin_unlock(&ci->i_ceph_lock);
1174 1175
1175 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id, 1176 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1176 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq, 1177 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
@@ -1198,13 +1199,13 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1198 * Unless @again is true, skip cap_snaps that were already sent to 1199 * Unless @again is true, skip cap_snaps that were already sent to
1199 * the MDS (i.e., during this session). 1200 * the MDS (i.e., during this session).
1200 * 1201 *
1201 * Called under i_lock. Takes s_mutex as needed. 1202 * Called under i_ceph_lock. Takes s_mutex as needed.
1202 */ 1203 */
1203void __ceph_flush_snaps(struct ceph_inode_info *ci, 1204void __ceph_flush_snaps(struct ceph_inode_info *ci,
1204 struct ceph_mds_session **psession, 1205 struct ceph_mds_session **psession,
1205 int again) 1206 int again)
1206 __releases(ci->vfs_inode->i_lock) 1207 __releases(ci->i_ceph_lock)
1207 __acquires(ci->vfs_inode->i_lock) 1208 __acquires(ci->i_ceph_lock)
1208{ 1209{
1209 struct inode *inode = &ci->vfs_inode; 1210 struct inode *inode = &ci->vfs_inode;
1210 int mds; 1211 int mds;
@@ -1261,7 +1262,7 @@ retry:
1261 session = NULL; 1262 session = NULL;
1262 } 1263 }
1263 if (!session) { 1264 if (!session) {
1264 spin_unlock(&inode->i_lock); 1265 spin_unlock(&ci->i_ceph_lock);
1265 mutex_lock(&mdsc->mutex); 1266 mutex_lock(&mdsc->mutex);
1266 session = __ceph_lookup_mds_session(mdsc, mds); 1267 session = __ceph_lookup_mds_session(mdsc, mds);
1267 mutex_unlock(&mdsc->mutex); 1268 mutex_unlock(&mdsc->mutex);
@@ -1275,7 +1276,7 @@ retry:
1275 * deletion or migration. retry, and we'll 1276 * deletion or migration. retry, and we'll
1276 * get a better @mds value next time. 1277 * get a better @mds value next time.
1277 */ 1278 */
1278 spin_lock(&inode->i_lock); 1279 spin_lock(&ci->i_ceph_lock);
1279 goto retry; 1280 goto retry;
1280 } 1281 }
1281 1282
@@ -1285,7 +1286,7 @@ retry:
1285 list_del_init(&capsnap->flushing_item); 1286 list_del_init(&capsnap->flushing_item);
1286 list_add_tail(&capsnap->flushing_item, 1287 list_add_tail(&capsnap->flushing_item,
1287 &session->s_cap_snaps_flushing); 1288 &session->s_cap_snaps_flushing);
1288 spin_unlock(&inode->i_lock); 1289 spin_unlock(&ci->i_ceph_lock);
1289 1290
1290 dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n", 1291 dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
1291 inode, capsnap, capsnap->follows, capsnap->flush_tid); 1292 inode, capsnap, capsnap->follows, capsnap->flush_tid);
@@ -1302,7 +1303,7 @@ retry:
1302 next_follows = capsnap->follows + 1; 1303 next_follows = capsnap->follows + 1;
1303 ceph_put_cap_snap(capsnap); 1304 ceph_put_cap_snap(capsnap);
1304 1305
1305 spin_lock(&inode->i_lock); 1306 spin_lock(&ci->i_ceph_lock);
1306 goto retry; 1307 goto retry;
1307 } 1308 }
1308 1309
@@ -1322,11 +1323,9 @@ out:
1322 1323
1323static void ceph_flush_snaps(struct ceph_inode_info *ci) 1324static void ceph_flush_snaps(struct ceph_inode_info *ci)
1324{ 1325{
1325 struct inode *inode = &ci->vfs_inode; 1326 spin_lock(&ci->i_ceph_lock);
1326
1327 spin_lock(&inode->i_lock);
1328 __ceph_flush_snaps(ci, NULL, 0); 1327 __ceph_flush_snaps(ci, NULL, 0);
1329 spin_unlock(&inode->i_lock); 1328 spin_unlock(&ci->i_ceph_lock);
1330} 1329}
1331 1330
1332/* 1331/*
@@ -1373,7 +1372,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1373 * Add dirty inode to the flushing list. Assigned a seq number so we 1372 * Add dirty inode to the flushing list. Assigned a seq number so we
1374 * can wait for caps to flush without starving. 1373 * can wait for caps to flush without starving.
1375 * 1374 *
1376 * Called under i_lock. 1375 * Called under i_ceph_lock.
1377 */ 1376 */
1378static int __mark_caps_flushing(struct inode *inode, 1377static int __mark_caps_flushing(struct inode *inode,
1379 struct ceph_mds_session *session) 1378 struct ceph_mds_session *session)
@@ -1421,9 +1420,9 @@ static int try_nonblocking_invalidate(struct inode *inode)
1421 struct ceph_inode_info *ci = ceph_inode(inode); 1420 struct ceph_inode_info *ci = ceph_inode(inode);
1422 u32 invalidating_gen = ci->i_rdcache_gen; 1421 u32 invalidating_gen = ci->i_rdcache_gen;
1423 1422
1424 spin_unlock(&inode->i_lock); 1423 spin_unlock(&ci->i_ceph_lock);
1425 invalidate_mapping_pages(&inode->i_data, 0, -1); 1424 invalidate_mapping_pages(&inode->i_data, 0, -1);
1426 spin_lock(&inode->i_lock); 1425 spin_lock(&ci->i_ceph_lock);
1427 1426
1428 if (inode->i_data.nrpages == 0 && 1427 if (inode->i_data.nrpages == 0 &&
1429 invalidating_gen == ci->i_rdcache_gen) { 1428 invalidating_gen == ci->i_rdcache_gen) {
@@ -1470,7 +1469,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1470 if (mdsc->stopping) 1469 if (mdsc->stopping)
1471 is_delayed = 1; 1470 is_delayed = 1;
1472 1471
1473 spin_lock(&inode->i_lock); 1472 spin_lock(&ci->i_ceph_lock);
1474 1473
1475 if (ci->i_ceph_flags & CEPH_I_FLUSH) 1474 if (ci->i_ceph_flags & CEPH_I_FLUSH)
1476 flags |= CHECK_CAPS_FLUSH; 1475 flags |= CHECK_CAPS_FLUSH;
@@ -1480,7 +1479,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1480 __ceph_flush_snaps(ci, &session, 0); 1479 __ceph_flush_snaps(ci, &session, 0);
1481 goto retry_locked; 1480 goto retry_locked;
1482retry: 1481retry:
1483 spin_lock(&inode->i_lock); 1482 spin_lock(&ci->i_ceph_lock);
1484retry_locked: 1483retry_locked:
1485 file_wanted = __ceph_caps_file_wanted(ci); 1484 file_wanted = __ceph_caps_file_wanted(ci);
1486 used = __ceph_caps_used(ci); 1485 used = __ceph_caps_used(ci);
@@ -1634,7 +1633,7 @@ ack:
1634 if (mutex_trylock(&session->s_mutex) == 0) { 1633 if (mutex_trylock(&session->s_mutex) == 0) {
1635 dout("inverting session/ino locks on %p\n", 1634 dout("inverting session/ino locks on %p\n",
1636 session); 1635 session);
1637 spin_unlock(&inode->i_lock); 1636 spin_unlock(&ci->i_ceph_lock);
1638 if (took_snap_rwsem) { 1637 if (took_snap_rwsem) {
1639 up_read(&mdsc->snap_rwsem); 1638 up_read(&mdsc->snap_rwsem);
1640 took_snap_rwsem = 0; 1639 took_snap_rwsem = 0;
@@ -1648,7 +1647,7 @@ ack:
1648 if (down_read_trylock(&mdsc->snap_rwsem) == 0) { 1647 if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1649 dout("inverting snap/in locks on %p\n", 1648 dout("inverting snap/in locks on %p\n",
1650 inode); 1649 inode);
1651 spin_unlock(&inode->i_lock); 1650 spin_unlock(&ci->i_ceph_lock);
1652 down_read(&mdsc->snap_rwsem); 1651 down_read(&mdsc->snap_rwsem);
1653 took_snap_rwsem = 1; 1652 took_snap_rwsem = 1;
1654 goto retry; 1653 goto retry;
@@ -1664,10 +1663,10 @@ ack:
1664 mds = cap->mds; /* remember mds, so we don't repeat */ 1663 mds = cap->mds; /* remember mds, so we don't repeat */
1665 sent++; 1664 sent++;
1666 1665
1667 /* __send_cap drops i_lock */ 1666 /* __send_cap drops i_ceph_lock */
1668 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want, 1667 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
1669 retain, flushing, NULL); 1668 retain, flushing, NULL);
1670 goto retry; /* retake i_lock and restart our cap scan. */ 1669 goto retry; /* retake i_ceph_lock and restart our cap scan. */
1671 } 1670 }
1672 1671
1673 /* 1672 /*
@@ -1681,7 +1680,7 @@ ack:
1681 else if (!is_delayed || force_requeue) 1680 else if (!is_delayed || force_requeue)
1682 __cap_delay_requeue(mdsc, ci); 1681 __cap_delay_requeue(mdsc, ci);
1683 1682
1684 spin_unlock(&inode->i_lock); 1683 spin_unlock(&ci->i_ceph_lock);
1685 1684
1686 if (queue_invalidate) 1685 if (queue_invalidate)
1687 ceph_queue_invalidate(inode); 1686 ceph_queue_invalidate(inode);
@@ -1704,7 +1703,7 @@ static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1704 int flushing = 0; 1703 int flushing = 0;
1705 1704
1706retry: 1705retry:
1707 spin_lock(&inode->i_lock); 1706 spin_lock(&ci->i_ceph_lock);
1708 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { 1707 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1709 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode); 1708 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1710 goto out; 1709 goto out;
@@ -1716,7 +1715,7 @@ retry:
1716 int delayed; 1715 int delayed;
1717 1716
1718 if (!session) { 1717 if (!session) {
1719 spin_unlock(&inode->i_lock); 1718 spin_unlock(&ci->i_ceph_lock);
1720 session = cap->session; 1719 session = cap->session;
1721 mutex_lock(&session->s_mutex); 1720 mutex_lock(&session->s_mutex);
1722 goto retry; 1721 goto retry;
@@ -1727,18 +1726,18 @@ retry:
1727 1726
1728 flushing = __mark_caps_flushing(inode, session); 1727 flushing = __mark_caps_flushing(inode, session);
1729 1728
1730 /* __send_cap drops i_lock */ 1729 /* __send_cap drops i_ceph_lock */
1731 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want, 1730 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1732 cap->issued | cap->implemented, flushing, 1731 cap->issued | cap->implemented, flushing,
1733 flush_tid); 1732 flush_tid);
1734 if (!delayed) 1733 if (!delayed)
1735 goto out_unlocked; 1734 goto out_unlocked;
1736 1735
1737 spin_lock(&inode->i_lock); 1736 spin_lock(&ci->i_ceph_lock);
1738 __cap_delay_requeue(mdsc, ci); 1737 __cap_delay_requeue(mdsc, ci);
1739 } 1738 }
1740out: 1739out:
1741 spin_unlock(&inode->i_lock); 1740 spin_unlock(&ci->i_ceph_lock);
1742out_unlocked: 1741out_unlocked:
1743 if (session && unlock_session) 1742 if (session && unlock_session)
1744 mutex_unlock(&session->s_mutex); 1743 mutex_unlock(&session->s_mutex);
@@ -1753,7 +1752,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
1753 struct ceph_inode_info *ci = ceph_inode(inode); 1752 struct ceph_inode_info *ci = ceph_inode(inode);
1754 int i, ret = 1; 1753 int i, ret = 1;
1755 1754
1756 spin_lock(&inode->i_lock); 1755 spin_lock(&ci->i_ceph_lock);
1757 for (i = 0; i < CEPH_CAP_BITS; i++) 1756 for (i = 0; i < CEPH_CAP_BITS; i++)
1758 if ((ci->i_flushing_caps & (1 << i)) && 1757 if ((ci->i_flushing_caps & (1 << i)) &&
1759 ci->i_cap_flush_tid[i] <= tid) { 1758 ci->i_cap_flush_tid[i] <= tid) {
@@ -1761,7 +1760,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
1761 ret = 0; 1760 ret = 0;
1762 break; 1761 break;
1763 } 1762 }
1764 spin_unlock(&inode->i_lock); 1763 spin_unlock(&ci->i_ceph_lock);
1765 return ret; 1764 return ret;
1766} 1765}
1767 1766
@@ -1868,10 +1867,10 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
1868 struct ceph_mds_client *mdsc = 1867 struct ceph_mds_client *mdsc =
1869 ceph_sb_to_client(inode->i_sb)->mdsc; 1868 ceph_sb_to_client(inode->i_sb)->mdsc;
1870 1869
1871 spin_lock(&inode->i_lock); 1870 spin_lock(&ci->i_ceph_lock);
1872 if (__ceph_caps_dirty(ci)) 1871 if (__ceph_caps_dirty(ci))
1873 __cap_delay_requeue_front(mdsc, ci); 1872 __cap_delay_requeue_front(mdsc, ci);
1874 spin_unlock(&inode->i_lock); 1873 spin_unlock(&ci->i_ceph_lock);
1875 } 1874 }
1876 return err; 1875 return err;
1877} 1876}
@@ -1894,7 +1893,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1894 struct inode *inode = &ci->vfs_inode; 1893 struct inode *inode = &ci->vfs_inode;
1895 struct ceph_cap *cap; 1894 struct ceph_cap *cap;
1896 1895
1897 spin_lock(&inode->i_lock); 1896 spin_lock(&ci->i_ceph_lock);
1898 cap = ci->i_auth_cap; 1897 cap = ci->i_auth_cap;
1899 if (cap && cap->session == session) { 1898 if (cap && cap->session == session) {
1900 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode, 1899 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
@@ -1904,7 +1903,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1904 pr_err("%p auth cap %p not mds%d ???\n", inode, 1903 pr_err("%p auth cap %p not mds%d ???\n", inode,
1905 cap, session->s_mds); 1904 cap, session->s_mds);
1906 } 1905 }
1907 spin_unlock(&inode->i_lock); 1906 spin_unlock(&ci->i_ceph_lock);
1908 } 1907 }
1909} 1908}
1910 1909
@@ -1921,7 +1920,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1921 struct ceph_cap *cap; 1920 struct ceph_cap *cap;
1922 int delayed = 0; 1921 int delayed = 0;
1923 1922
1924 spin_lock(&inode->i_lock); 1923 spin_lock(&ci->i_ceph_lock);
1925 cap = ci->i_auth_cap; 1924 cap = ci->i_auth_cap;
1926 if (cap && cap->session == session) { 1925 if (cap && cap->session == session) {
1927 dout("kick_flushing_caps %p cap %p %s\n", inode, 1926 dout("kick_flushing_caps %p cap %p %s\n", inode,
@@ -1932,14 +1931,14 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1932 cap->issued | cap->implemented, 1931 cap->issued | cap->implemented,
1933 ci->i_flushing_caps, NULL); 1932 ci->i_flushing_caps, NULL);
1934 if (delayed) { 1933 if (delayed) {
1935 spin_lock(&inode->i_lock); 1934 spin_lock(&ci->i_ceph_lock);
1936 __cap_delay_requeue(mdsc, ci); 1935 __cap_delay_requeue(mdsc, ci);
1937 spin_unlock(&inode->i_lock); 1936 spin_unlock(&ci->i_ceph_lock);
1938 } 1937 }
1939 } else { 1938 } else {
1940 pr_err("%p auth cap %p not mds%d ???\n", inode, 1939 pr_err("%p auth cap %p not mds%d ???\n", inode,
1941 cap, session->s_mds); 1940 cap, session->s_mds);
1942 spin_unlock(&inode->i_lock); 1941 spin_unlock(&ci->i_ceph_lock);
1943 } 1942 }
1944 } 1943 }
1945} 1944}
@@ -1952,7 +1951,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
1952 struct ceph_cap *cap; 1951 struct ceph_cap *cap;
1953 int delayed = 0; 1952 int delayed = 0;
1954 1953
1955 spin_lock(&inode->i_lock); 1954 spin_lock(&ci->i_ceph_lock);
1956 cap = ci->i_auth_cap; 1955 cap = ci->i_auth_cap;
1957 dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode, 1956 dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
1958 ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq); 1957 ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
@@ -1964,12 +1963,12 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
1964 cap->issued | cap->implemented, 1963 cap->issued | cap->implemented,
1965 ci->i_flushing_caps, NULL); 1964 ci->i_flushing_caps, NULL);
1966 if (delayed) { 1965 if (delayed) {
1967 spin_lock(&inode->i_lock); 1966 spin_lock(&ci->i_ceph_lock);
1968 __cap_delay_requeue(mdsc, ci); 1967 __cap_delay_requeue(mdsc, ci);
1969 spin_unlock(&inode->i_lock); 1968 spin_unlock(&ci->i_ceph_lock);
1970 } 1969 }
1971 } else { 1970 } else {
1972 spin_unlock(&inode->i_lock); 1971 spin_unlock(&ci->i_ceph_lock);
1973 } 1972 }
1974} 1973}
1975 1974
@@ -1978,7 +1977,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
1978 * Take references to capabilities we hold, so that we don't release 1977 * Take references to capabilities we hold, so that we don't release
1979 * them to the MDS prematurely. 1978 * them to the MDS prematurely.
1980 * 1979 *
1981 * Protected by i_lock. 1980 * Protected by i_ceph_lock.
1982 */ 1981 */
1983static void __take_cap_refs(struct ceph_inode_info *ci, int got) 1982static void __take_cap_refs(struct ceph_inode_info *ci, int got)
1984{ 1983{
@@ -2016,7 +2015,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2016 2015
2017 dout("get_cap_refs %p need %s want %s\n", inode, 2016 dout("get_cap_refs %p need %s want %s\n", inode,
2018 ceph_cap_string(need), ceph_cap_string(want)); 2017 ceph_cap_string(need), ceph_cap_string(want));
2019 spin_lock(&inode->i_lock); 2018 spin_lock(&ci->i_ceph_lock);
2020 2019
2021 /* make sure file is actually open */ 2020 /* make sure file is actually open */
2022 file_wanted = __ceph_caps_file_wanted(ci); 2021 file_wanted = __ceph_caps_file_wanted(ci);
@@ -2077,7 +2076,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2077 ceph_cap_string(have), ceph_cap_string(need)); 2076 ceph_cap_string(have), ceph_cap_string(need));
2078 } 2077 }
2079out: 2078out:
2080 spin_unlock(&inode->i_lock); 2079 spin_unlock(&ci->i_ceph_lock);
2081 dout("get_cap_refs %p ret %d got %s\n", inode, 2080 dout("get_cap_refs %p ret %d got %s\n", inode,
2082 ret, ceph_cap_string(*got)); 2081 ret, ceph_cap_string(*got));
2083 return ret; 2082 return ret;
@@ -2094,7 +2093,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
2094 int check = 0; 2093 int check = 0;
2095 2094
2096 /* do we need to explicitly request a larger max_size? */ 2095 /* do we need to explicitly request a larger max_size? */
2097 spin_lock(&inode->i_lock); 2096 spin_lock(&ci->i_ceph_lock);
2098 if ((endoff >= ci->i_max_size || 2097 if ((endoff >= ci->i_max_size ||
2099 endoff > (inode->i_size << 1)) && 2098 endoff > (inode->i_size << 1)) &&
2100 endoff > ci->i_wanted_max_size) { 2099 endoff > ci->i_wanted_max_size) {
@@ -2103,7 +2102,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
2103 ci->i_wanted_max_size = endoff; 2102 ci->i_wanted_max_size = endoff;
2104 check = 1; 2103 check = 1;
2105 } 2104 }
2106 spin_unlock(&inode->i_lock); 2105 spin_unlock(&ci->i_ceph_lock);
2107 if (check) 2106 if (check)
2108 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 2107 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2109} 2108}
@@ -2140,9 +2139,9 @@ retry:
2140 */ 2139 */
2141void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps) 2140void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2142{ 2141{
2143 spin_lock(&ci->vfs_inode.i_lock); 2142 spin_lock(&ci->i_ceph_lock);
2144 __take_cap_refs(ci, caps); 2143 __take_cap_refs(ci, caps);
2145 spin_unlock(&ci->vfs_inode.i_lock); 2144 spin_unlock(&ci->i_ceph_lock);
2146} 2145}
2147 2146
2148/* 2147/*
@@ -2160,7 +2159,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2160 int last = 0, put = 0, flushsnaps = 0, wake = 0; 2159 int last = 0, put = 0, flushsnaps = 0, wake = 0;
2161 struct ceph_cap_snap *capsnap; 2160 struct ceph_cap_snap *capsnap;
2162 2161
2163 spin_lock(&inode->i_lock); 2162 spin_lock(&ci->i_ceph_lock);
2164 if (had & CEPH_CAP_PIN) 2163 if (had & CEPH_CAP_PIN)
2165 --ci->i_pin_ref; 2164 --ci->i_pin_ref;
2166 if (had & CEPH_CAP_FILE_RD) 2165 if (had & CEPH_CAP_FILE_RD)
@@ -2193,7 +2192,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2193 } 2192 }
2194 } 2193 }
2195 } 2194 }
2196 spin_unlock(&inode->i_lock); 2195 spin_unlock(&ci->i_ceph_lock);
2197 2196
2198 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had), 2197 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2199 last ? " last" : "", put ? " put" : ""); 2198 last ? " last" : "", put ? " put" : "");
@@ -2225,7 +2224,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2225 int found = 0; 2224 int found = 0;
2226 struct ceph_cap_snap *capsnap = NULL; 2225 struct ceph_cap_snap *capsnap = NULL;
2227 2226
2228 spin_lock(&inode->i_lock); 2227 spin_lock(&ci->i_ceph_lock);
2229 ci->i_wrbuffer_ref -= nr; 2228 ci->i_wrbuffer_ref -= nr;
2230 last = !ci->i_wrbuffer_ref; 2229 last = !ci->i_wrbuffer_ref;
2231 2230
@@ -2274,7 +2273,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2274 } 2273 }
2275 } 2274 }
2276 2275
2277 spin_unlock(&inode->i_lock); 2276 spin_unlock(&ci->i_ceph_lock);
2278 2277
2279 if (last) { 2278 if (last) {
2280 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 2279 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -2291,7 +2290,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2291 * Handle a cap GRANT message from the MDS. (Note that a GRANT may 2290 * Handle a cap GRANT message from the MDS. (Note that a GRANT may
2292 * actually be a revocation if it specifies a smaller cap set.) 2291 * actually be a revocation if it specifies a smaller cap set.)
2293 * 2292 *
2294 * caller holds s_mutex and i_lock, we drop both. 2293 * caller holds s_mutex and i_ceph_lock, we drop both.
2295 * 2294 *
2296 * return value: 2295 * return value:
2297 * 0 - ok 2296 * 0 - ok
@@ -2302,7 +2301,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2302 struct ceph_mds_session *session, 2301 struct ceph_mds_session *session,
2303 struct ceph_cap *cap, 2302 struct ceph_cap *cap,
2304 struct ceph_buffer *xattr_buf) 2303 struct ceph_buffer *xattr_buf)
2305 __releases(inode->i_lock) 2304 __releases(ci->i_ceph_lock)
2306{ 2305{
2307 struct ceph_inode_info *ci = ceph_inode(inode); 2306 struct ceph_inode_info *ci = ceph_inode(inode);
2308 int mds = session->s_mds; 2307 int mds = session->s_mds;
@@ -2453,7 +2452,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2453 } 2452 }
2454 BUG_ON(cap->issued & ~cap->implemented); 2453 BUG_ON(cap->issued & ~cap->implemented);
2455 2454
2456 spin_unlock(&inode->i_lock); 2455 spin_unlock(&ci->i_ceph_lock);
2457 if (writeback) 2456 if (writeback)
2458 /* 2457 /*
2459 * queue inode for writeback: we can't actually call 2458 * queue inode for writeback: we can't actually call
@@ -2483,7 +2482,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2483 struct ceph_mds_caps *m, 2482 struct ceph_mds_caps *m,
2484 struct ceph_mds_session *session, 2483 struct ceph_mds_session *session,
2485 struct ceph_cap *cap) 2484 struct ceph_cap *cap)
2486 __releases(inode->i_lock) 2485 __releases(ci->i_ceph_lock)
2487{ 2486{
2488 struct ceph_inode_info *ci = ceph_inode(inode); 2487 struct ceph_inode_info *ci = ceph_inode(inode);
2489 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 2488 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -2539,7 +2538,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2539 wake_up_all(&ci->i_cap_wq); 2538 wake_up_all(&ci->i_cap_wq);
2540 2539
2541out: 2540out:
2542 spin_unlock(&inode->i_lock); 2541 spin_unlock(&ci->i_ceph_lock);
2543 if (drop) 2542 if (drop)
2544 iput(inode); 2543 iput(inode);
2545} 2544}
@@ -2562,7 +2561,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2562 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n", 2561 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
2563 inode, ci, session->s_mds, follows); 2562 inode, ci, session->s_mds, follows);
2564 2563
2565 spin_lock(&inode->i_lock); 2564 spin_lock(&ci->i_ceph_lock);
2566 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 2565 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2567 if (capsnap->follows == follows) { 2566 if (capsnap->follows == follows) {
2568 if (capsnap->flush_tid != flush_tid) { 2567 if (capsnap->flush_tid != flush_tid) {
@@ -2585,7 +2584,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2585 capsnap, capsnap->follows); 2584 capsnap, capsnap->follows);
2586 } 2585 }
2587 } 2586 }
2588 spin_unlock(&inode->i_lock); 2587 spin_unlock(&ci->i_ceph_lock);
2589 if (drop) 2588 if (drop)
2590 iput(inode); 2589 iput(inode);
2591} 2590}
@@ -2598,7 +2597,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2598static void handle_cap_trunc(struct inode *inode, 2597static void handle_cap_trunc(struct inode *inode,
2599 struct ceph_mds_caps *trunc, 2598 struct ceph_mds_caps *trunc,
2600 struct ceph_mds_session *session) 2599 struct ceph_mds_session *session)
2601 __releases(inode->i_lock) 2600 __releases(ci->i_ceph_lock)
2602{ 2601{
2603 struct ceph_inode_info *ci = ceph_inode(inode); 2602 struct ceph_inode_info *ci = ceph_inode(inode);
2604 int mds = session->s_mds; 2603 int mds = session->s_mds;
@@ -2617,7 +2616,7 @@ static void handle_cap_trunc(struct inode *inode,
2617 inode, mds, seq, truncate_size, truncate_seq); 2616 inode, mds, seq, truncate_size, truncate_seq);
2618 queue_trunc = ceph_fill_file_size(inode, issued, 2617 queue_trunc = ceph_fill_file_size(inode, issued,
2619 truncate_seq, truncate_size, size); 2618 truncate_seq, truncate_size, size);
2620 spin_unlock(&inode->i_lock); 2619 spin_unlock(&ci->i_ceph_lock);
2621 2620
2622 if (queue_trunc) 2621 if (queue_trunc)
2623 ceph_queue_vmtruncate(inode); 2622 ceph_queue_vmtruncate(inode);
@@ -2646,7 +2645,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2646 dout("handle_cap_export inode %p ci %p mds%d mseq %d\n", 2645 dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
2647 inode, ci, mds, mseq); 2646 inode, ci, mds, mseq);
2648 2647
2649 spin_lock(&inode->i_lock); 2648 spin_lock(&ci->i_ceph_lock);
2650 2649
2651 /* make sure we haven't seen a higher mseq */ 2650 /* make sure we haven't seen a higher mseq */
2652 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 2651 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
@@ -2690,7 +2689,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2690 } 2689 }
2691 /* else, we already released it */ 2690 /* else, we already released it */
2692 2691
2693 spin_unlock(&inode->i_lock); 2692 spin_unlock(&ci->i_ceph_lock);
2694} 2693}
2695 2694
2696/* 2695/*
@@ -2745,9 +2744,9 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
2745 up_read(&mdsc->snap_rwsem); 2744 up_read(&mdsc->snap_rwsem);
2746 2745
2747 /* make sure we re-request max_size, if necessary */ 2746 /* make sure we re-request max_size, if necessary */
2748 spin_lock(&inode->i_lock); 2747 spin_lock(&ci->i_ceph_lock);
2749 ci->i_requested_max_size = 0; 2748 ci->i_requested_max_size = 0;
2750 spin_unlock(&inode->i_lock); 2749 spin_unlock(&ci->i_ceph_lock);
2751} 2750}
2752 2751
2753/* 2752/*
@@ -2762,6 +2761,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2762 struct ceph_mds_client *mdsc = session->s_mdsc; 2761 struct ceph_mds_client *mdsc = session->s_mdsc;
2763 struct super_block *sb = mdsc->fsc->sb; 2762 struct super_block *sb = mdsc->fsc->sb;
2764 struct inode *inode; 2763 struct inode *inode;
2764 struct ceph_inode_info *ci;
2765 struct ceph_cap *cap; 2765 struct ceph_cap *cap;
2766 struct ceph_mds_caps *h; 2766 struct ceph_mds_caps *h;
2767 int mds = session->s_mds; 2767 int mds = session->s_mds;
@@ -2815,6 +2815,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2815 2815
2816 /* lookup ino */ 2816 /* lookup ino */
2817 inode = ceph_find_inode(sb, vino); 2817 inode = ceph_find_inode(sb, vino);
2818 ci = ceph_inode(inode);
2818 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino, 2819 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
2819 vino.snap, inode); 2820 vino.snap, inode);
2820 if (!inode) { 2821 if (!inode) {
@@ -2844,16 +2845,16 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2844 } 2845 }
2845 2846
2846 /* the rest require a cap */ 2847 /* the rest require a cap */
2847 spin_lock(&inode->i_lock); 2848 spin_lock(&ci->i_ceph_lock);
2848 cap = __get_cap_for_mds(ceph_inode(inode), mds); 2849 cap = __get_cap_for_mds(ceph_inode(inode), mds);
2849 if (!cap) { 2850 if (!cap) {
2850 dout(" no cap on %p ino %llx.%llx from mds%d\n", 2851 dout(" no cap on %p ino %llx.%llx from mds%d\n",
2851 inode, ceph_ino(inode), ceph_snap(inode), mds); 2852 inode, ceph_ino(inode), ceph_snap(inode), mds);
2852 spin_unlock(&inode->i_lock); 2853 spin_unlock(&ci->i_ceph_lock);
2853 goto flush_cap_releases; 2854 goto flush_cap_releases;
2854 } 2855 }
2855 2856
2856 /* note that each of these drops i_lock for us */ 2857 /* note that each of these drops i_ceph_lock for us */
2857 switch (op) { 2858 switch (op) {
2858 case CEPH_CAP_OP_REVOKE: 2859 case CEPH_CAP_OP_REVOKE:
2859 case CEPH_CAP_OP_GRANT: 2860 case CEPH_CAP_OP_GRANT:
@@ -2869,7 +2870,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2869 break; 2870 break;
2870 2871
2871 default: 2872 default:
2872 spin_unlock(&inode->i_lock); 2873 spin_unlock(&ci->i_ceph_lock);
2873 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op, 2874 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
2874 ceph_cap_op_name(op)); 2875 ceph_cap_op_name(op));
2875 } 2876 }
@@ -2962,13 +2963,13 @@ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
2962 struct inode *inode = &ci->vfs_inode; 2963 struct inode *inode = &ci->vfs_inode;
2963 int last = 0; 2964 int last = 0;
2964 2965
2965 spin_lock(&inode->i_lock); 2966 spin_lock(&ci->i_ceph_lock);
2966 dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode, 2967 dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
2967 ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1); 2968 ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
2968 BUG_ON(ci->i_nr_by_mode[fmode] == 0); 2969 BUG_ON(ci->i_nr_by_mode[fmode] == 0);
2969 if (--ci->i_nr_by_mode[fmode] == 0) 2970 if (--ci->i_nr_by_mode[fmode] == 0)
2970 last++; 2971 last++;
2971 spin_unlock(&inode->i_lock); 2972 spin_unlock(&ci->i_ceph_lock);
2972 2973
2973 if (last && ci->i_vino.snap == CEPH_NOSNAP) 2974 if (last && ci->i_vino.snap == CEPH_NOSNAP)
2974 ceph_check_caps(ci, 0, NULL); 2975 ceph_check_caps(ci, 0, NULL);
@@ -2991,7 +2992,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
2991 int used, dirty; 2992 int used, dirty;
2992 int ret = 0; 2993 int ret = 0;
2993 2994
2994 spin_lock(&inode->i_lock); 2995 spin_lock(&ci->i_ceph_lock);
2995 used = __ceph_caps_used(ci); 2996 used = __ceph_caps_used(ci);
2996 dirty = __ceph_caps_dirty(ci); 2997 dirty = __ceph_caps_dirty(ci);
2997 2998
@@ -3046,7 +3047,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
3046 inode, cap, ceph_cap_string(cap->issued)); 3047 inode, cap, ceph_cap_string(cap->issued));
3047 } 3048 }
3048 } 3049 }
3049 spin_unlock(&inode->i_lock); 3050 spin_unlock(&ci->i_ceph_lock);
3050 return ret; 3051 return ret;
3051} 3052}
3052 3053
@@ -3061,7 +3062,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3061 3062
3062 /* 3063 /*
3063 * force an record for the directory caps if we have a dentry lease. 3064 * force an record for the directory caps if we have a dentry lease.
3064 * this is racy (can't take i_lock and d_lock together), but it 3065 * this is racy (can't take i_ceph_lock and d_lock together), but it
3065 * doesn't have to be perfect; the mds will revoke anything we don't 3066 * doesn't have to be perfect; the mds will revoke anything we don't
3066 * release. 3067 * release.
3067 */ 3068 */
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 2abd0dfad7f8..3eeb97661262 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -281,18 +281,18 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
281 } 281 }
282 282
283 /* can we use the dcache? */ 283 /* can we use the dcache? */
284 spin_lock(&inode->i_lock); 284 spin_lock(&ci->i_ceph_lock);
285 if ((filp->f_pos == 2 || fi->dentry) && 285 if ((filp->f_pos == 2 || fi->dentry) &&
286 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && 286 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
287 ceph_snap(inode) != CEPH_SNAPDIR && 287 ceph_snap(inode) != CEPH_SNAPDIR &&
288 ceph_dir_test_complete(inode) && 288 ceph_dir_test_complete(inode) &&
289 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 289 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
290 spin_unlock(&inode->i_lock); 290 spin_unlock(&ci->i_ceph_lock);
291 err = __dcache_readdir(filp, dirent, filldir); 291 err = __dcache_readdir(filp, dirent, filldir);
292 if (err != -EAGAIN) 292 if (err != -EAGAIN)
293 return err; 293 return err;
294 } else { 294 } else {
295 spin_unlock(&inode->i_lock); 295 spin_unlock(&ci->i_ceph_lock);
296 } 296 }
297 if (fi->dentry) { 297 if (fi->dentry) {
298 err = note_last_dentry(fi, fi->dentry->d_name.name, 298 err = note_last_dentry(fi, fi->dentry->d_name.name,
@@ -428,12 +428,12 @@ more:
428 * were released during the whole readdir, and we should have 428 * were released during the whole readdir, and we should have
429 * the complete dir contents in our cache. 429 * the complete dir contents in our cache.
430 */ 430 */
431 spin_lock(&inode->i_lock); 431 spin_lock(&ci->i_ceph_lock);
432 if (ci->i_release_count == fi->dir_release_count) { 432 if (ci->i_release_count == fi->dir_release_count) {
433 ceph_dir_set_complete(inode); 433 ceph_dir_set_complete(inode);
434 ci->i_max_offset = filp->f_pos; 434 ci->i_max_offset = filp->f_pos;
435 } 435 }
436 spin_unlock(&inode->i_lock); 436 spin_unlock(&ci->i_ceph_lock);
437 437
438 dout("readdir %p filp %p done.\n", inode, filp); 438 dout("readdir %p filp %p done.\n", inode, filp);
439 return 0; 439 return 0;
@@ -607,7 +607,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
607 struct ceph_inode_info *ci = ceph_inode(dir); 607 struct ceph_inode_info *ci = ceph_inode(dir);
608 struct ceph_dentry_info *di = ceph_dentry(dentry); 608 struct ceph_dentry_info *di = ceph_dentry(dentry);
609 609
610 spin_lock(&dir->i_lock); 610 spin_lock(&ci->i_ceph_lock);
611 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); 611 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
612 if (strncmp(dentry->d_name.name, 612 if (strncmp(dentry->d_name.name,
613 fsc->mount_options->snapdir_name, 613 fsc->mount_options->snapdir_name,
@@ -615,13 +615,13 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
615 !is_root_ceph_dentry(dir, dentry) && 615 !is_root_ceph_dentry(dir, dentry) &&
616 ceph_dir_test_complete(dir) && 616 ceph_dir_test_complete(dir) &&
617 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { 617 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
618 spin_unlock(&dir->i_lock); 618 spin_unlock(&ci->i_ceph_lock);
619 dout(" dir %p complete, -ENOENT\n", dir); 619 dout(" dir %p complete, -ENOENT\n", dir);
620 d_add(dentry, NULL); 620 d_add(dentry, NULL);
621 di->lease_shared_gen = ci->i_shared_gen; 621 di->lease_shared_gen = ci->i_shared_gen;
622 return NULL; 622 return NULL;
623 } 623 }
624 spin_unlock(&dir->i_lock); 624 spin_unlock(&ci->i_ceph_lock);
625 } 625 }
626 626
627 op = ceph_snap(dir) == CEPH_SNAPDIR ? 627 op = ceph_snap(dir) == CEPH_SNAPDIR ?
@@ -841,12 +841,12 @@ static int drop_caps_for_unlink(struct inode *inode)
841 struct ceph_inode_info *ci = ceph_inode(inode); 841 struct ceph_inode_info *ci = ceph_inode(inode);
842 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 842 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
843 843
844 spin_lock(&inode->i_lock); 844 spin_lock(&ci->i_ceph_lock);
845 if (inode->i_nlink == 1) { 845 if (inode->i_nlink == 1) {
846 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); 846 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
847 ci->i_ceph_flags |= CEPH_I_NODELAY; 847 ci->i_ceph_flags |= CEPH_I_NODELAY;
848 } 848 }
849 spin_unlock(&inode->i_lock); 849 spin_unlock(&ci->i_ceph_lock);
850 return drop; 850 return drop;
851} 851}
852 852
@@ -1015,10 +1015,10 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
1015 struct ceph_dentry_info *di = ceph_dentry(dentry); 1015 struct ceph_dentry_info *di = ceph_dentry(dentry);
1016 int valid = 0; 1016 int valid = 0;
1017 1017
1018 spin_lock(&dir->i_lock); 1018 spin_lock(&ci->i_ceph_lock);
1019 if (ci->i_shared_gen == di->lease_shared_gen) 1019 if (ci->i_shared_gen == di->lease_shared_gen)
1020 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); 1020 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
1021 spin_unlock(&dir->i_lock); 1021 spin_unlock(&ci->i_ceph_lock);
1022 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n", 1022 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1023 dir, (unsigned)ci->i_shared_gen, dentry, 1023 dir, (unsigned)ci->i_shared_gen, dentry,
1024 (unsigned)di->lease_shared_gen, valid); 1024 (unsigned)di->lease_shared_gen, valid);
@@ -1143,7 +1143,7 @@ static void ceph_d_prune(struct dentry *dentry)
1143{ 1143{
1144 struct ceph_dentry_info *di; 1144 struct ceph_dentry_info *di;
1145 1145
1146 dout("d_release %p\n", dentry); 1146 dout("ceph_d_prune %p\n", dentry);
1147 1147
1148 /* do we have a valid parent? */ 1148 /* do we have a valid parent? */
1149 if (!dentry->d_parent || IS_ROOT(dentry)) 1149 if (!dentry->d_parent || IS_ROOT(dentry))
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ce549d31eeb7..ed72428d9c75 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -147,9 +147,9 @@ int ceph_open(struct inode *inode, struct file *file)
147 147
148 /* trivially open snapdir */ 148 /* trivially open snapdir */
149 if (ceph_snap(inode) == CEPH_SNAPDIR) { 149 if (ceph_snap(inode) == CEPH_SNAPDIR) {
150 spin_lock(&inode->i_lock); 150 spin_lock(&ci->i_ceph_lock);
151 __ceph_get_fmode(ci, fmode); 151 __ceph_get_fmode(ci, fmode);
152 spin_unlock(&inode->i_lock); 152 spin_unlock(&ci->i_ceph_lock);
153 return ceph_init_file(inode, file, fmode); 153 return ceph_init_file(inode, file, fmode);
154 } 154 }
155 155
@@ -158,7 +158,7 @@ int ceph_open(struct inode *inode, struct file *file)
158 * write) or any MDS (for read). Update wanted set 158 * write) or any MDS (for read). Update wanted set
159 * asynchronously. 159 * asynchronously.
160 */ 160 */
161 spin_lock(&inode->i_lock); 161 spin_lock(&ci->i_ceph_lock);
162 if (__ceph_is_any_real_caps(ci) && 162 if (__ceph_is_any_real_caps(ci) &&
163 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { 163 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
164 int mds_wanted = __ceph_caps_mds_wanted(ci); 164 int mds_wanted = __ceph_caps_mds_wanted(ci);
@@ -168,7 +168,7 @@ int ceph_open(struct inode *inode, struct file *file)
168 inode, fmode, ceph_cap_string(wanted), 168 inode, fmode, ceph_cap_string(wanted),
169 ceph_cap_string(issued)); 169 ceph_cap_string(issued));
170 __ceph_get_fmode(ci, fmode); 170 __ceph_get_fmode(ci, fmode);
171 spin_unlock(&inode->i_lock); 171 spin_unlock(&ci->i_ceph_lock);
172 172
173 /* adjust wanted? */ 173 /* adjust wanted? */
174 if ((issued & wanted) != wanted && 174 if ((issued & wanted) != wanted &&
@@ -180,10 +180,10 @@ int ceph_open(struct inode *inode, struct file *file)
180 } else if (ceph_snap(inode) != CEPH_NOSNAP && 180 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
181 (ci->i_snap_caps & wanted) == wanted) { 181 (ci->i_snap_caps & wanted) == wanted) {
182 __ceph_get_fmode(ci, fmode); 182 __ceph_get_fmode(ci, fmode);
183 spin_unlock(&inode->i_lock); 183 spin_unlock(&ci->i_ceph_lock);
184 return ceph_init_file(inode, file, fmode); 184 return ceph_init_file(inode, file, fmode);
185 } 185 }
186 spin_unlock(&inode->i_lock); 186 spin_unlock(&ci->i_ceph_lock);
187 187
188 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); 188 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
189 req = prepare_open_request(inode->i_sb, flags, 0); 189 req = prepare_open_request(inode->i_sb, flags, 0);
@@ -743,9 +743,9 @@ retry_snap:
743 */ 743 */
744 int dirty; 744 int dirty;
745 745
746 spin_lock(&inode->i_lock); 746 spin_lock(&ci->i_ceph_lock);
747 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); 747 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
748 spin_unlock(&inode->i_lock); 748 spin_unlock(&ci->i_ceph_lock);
749 ceph_put_cap_refs(ci, got); 749 ceph_put_cap_refs(ci, got);
750 750
751 ret = generic_file_aio_write(iocb, iov, nr_segs, pos); 751 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
@@ -764,9 +764,9 @@ retry_snap:
764 764
765 if (ret >= 0) { 765 if (ret >= 0) {
766 int dirty; 766 int dirty;
767 spin_lock(&inode->i_lock); 767 spin_lock(&ci->i_ceph_lock);
768 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); 768 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
769 spin_unlock(&inode->i_lock); 769 spin_unlock(&ci->i_ceph_lock);
770 if (dirty) 770 if (dirty)
771 __mark_inode_dirty(inode, dirty); 771 __mark_inode_dirty(inode, dirty);
772 } 772 }
@@ -797,7 +797,8 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
797 797
798 mutex_lock(&inode->i_mutex); 798 mutex_lock(&inode->i_mutex);
799 __ceph_do_pending_vmtruncate(inode); 799 __ceph_do_pending_vmtruncate(inode);
800 if (origin != SEEK_CUR || origin != SEEK_SET) { 800
801 if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) {
801 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); 802 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
802 if (ret < 0) { 803 if (ret < 0) {
803 offset = ret; 804 offset = ret;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e392bfce84a3..87fb132fb330 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -297,6 +297,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
297 297
298 dout("alloc_inode %p\n", &ci->vfs_inode); 298 dout("alloc_inode %p\n", &ci->vfs_inode);
299 299
300 spin_lock_init(&ci->i_ceph_lock);
301
300 ci->i_version = 0; 302 ci->i_version = 0;
301 ci->i_time_warp_seq = 0; 303 ci->i_time_warp_seq = 0;
302 ci->i_ceph_flags = 0; 304 ci->i_ceph_flags = 0;
@@ -583,7 +585,7 @@ static int fill_inode(struct inode *inode,
583 iinfo->xattr_len); 585 iinfo->xattr_len);
584 } 586 }
585 587
586 spin_lock(&inode->i_lock); 588 spin_lock(&ci->i_ceph_lock);
587 589
588 /* 590 /*
589 * provided version will be odd if inode value is projected, 591 * provided version will be odd if inode value is projected,
@@ -680,7 +682,7 @@ static int fill_inode(struct inode *inode,
680 char *sym; 682 char *sym;
681 683
682 BUG_ON(symlen != inode->i_size); 684 BUG_ON(symlen != inode->i_size);
683 spin_unlock(&inode->i_lock); 685 spin_unlock(&ci->i_ceph_lock);
684 686
685 err = -ENOMEM; 687 err = -ENOMEM;
686 sym = kmalloc(symlen+1, GFP_NOFS); 688 sym = kmalloc(symlen+1, GFP_NOFS);
@@ -689,7 +691,7 @@ static int fill_inode(struct inode *inode,
689 memcpy(sym, iinfo->symlink, symlen); 691 memcpy(sym, iinfo->symlink, symlen);
690 sym[symlen] = 0; 692 sym[symlen] = 0;
691 693
692 spin_lock(&inode->i_lock); 694 spin_lock(&ci->i_ceph_lock);
693 if (!ci->i_symlink) 695 if (!ci->i_symlink)
694 ci->i_symlink = sym; 696 ci->i_symlink = sym;
695 else 697 else
@@ -715,7 +717,7 @@ static int fill_inode(struct inode *inode,
715 } 717 }
716 718
717no_change: 719no_change:
718 spin_unlock(&inode->i_lock); 720 spin_unlock(&ci->i_ceph_lock);
719 721
720 /* queue truncate if we saw i_size decrease */ 722 /* queue truncate if we saw i_size decrease */
721 if (queue_trunc) 723 if (queue_trunc)
@@ -750,13 +752,13 @@ no_change:
750 info->cap.flags, 752 info->cap.flags,
751 caps_reservation); 753 caps_reservation);
752 } else { 754 } else {
753 spin_lock(&inode->i_lock); 755 spin_lock(&ci->i_ceph_lock);
754 dout(" %p got snap_caps %s\n", inode, 756 dout(" %p got snap_caps %s\n", inode,
755 ceph_cap_string(le32_to_cpu(info->cap.caps))); 757 ceph_cap_string(le32_to_cpu(info->cap.caps)));
756 ci->i_snap_caps |= le32_to_cpu(info->cap.caps); 758 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
757 if (cap_fmode >= 0) 759 if (cap_fmode >= 0)
758 __ceph_get_fmode(ci, cap_fmode); 760 __ceph_get_fmode(ci, cap_fmode);
759 spin_unlock(&inode->i_lock); 761 spin_unlock(&ci->i_ceph_lock);
760 } 762 }
761 } else if (cap_fmode >= 0) { 763 } else if (cap_fmode >= 0) {
762 pr_warning("mds issued no caps on %llx.%llx\n", 764 pr_warning("mds issued no caps on %llx.%llx\n",
@@ -849,19 +851,20 @@ static void ceph_set_dentry_offset(struct dentry *dn)
849{ 851{
850 struct dentry *dir = dn->d_parent; 852 struct dentry *dir = dn->d_parent;
851 struct inode *inode = dir->d_inode; 853 struct inode *inode = dir->d_inode;
854 struct ceph_inode_info *ci = ceph_inode(inode);
852 struct ceph_dentry_info *di; 855 struct ceph_dentry_info *di;
853 856
854 BUG_ON(!inode); 857 BUG_ON(!inode);
855 858
856 di = ceph_dentry(dn); 859 di = ceph_dentry(dn);
857 860
858 spin_lock(&inode->i_lock); 861 spin_lock(&ci->i_ceph_lock);
859 if (!ceph_dir_test_complete(inode)) { 862 if (!ceph_dir_test_complete(inode)) {
860 spin_unlock(&inode->i_lock); 863 spin_unlock(&ci->i_ceph_lock);
861 return; 864 return;
862 } 865 }
863 di->offset = ceph_inode(inode)->i_max_offset++; 866 di->offset = ceph_inode(inode)->i_max_offset++;
864 spin_unlock(&inode->i_lock); 867 spin_unlock(&ci->i_ceph_lock);
865 868
866 spin_lock(&dir->d_lock); 869 spin_lock(&dir->d_lock);
867 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); 870 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
@@ -1308,7 +1311,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
1308 struct ceph_inode_info *ci = ceph_inode(inode); 1311 struct ceph_inode_info *ci = ceph_inode(inode);
1309 int ret = 0; 1312 int ret = 0;
1310 1313
1311 spin_lock(&inode->i_lock); 1314 spin_lock(&ci->i_ceph_lock);
1312 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); 1315 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1313 inode->i_size = size; 1316 inode->i_size = size;
1314 inode->i_blocks = (size + (1 << 9) - 1) >> 9; 1317 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
@@ -1318,7 +1321,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
1318 (ci->i_reported_size << 1) < ci->i_max_size) 1321 (ci->i_reported_size << 1) < ci->i_max_size)
1319 ret = 1; 1322 ret = 1;
1320 1323
1321 spin_unlock(&inode->i_lock); 1324 spin_unlock(&ci->i_ceph_lock);
1322 return ret; 1325 return ret;
1323} 1326}
1324 1327
@@ -1328,12 +1331,13 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
1328 */ 1331 */
1329void ceph_queue_writeback(struct inode *inode) 1332void ceph_queue_writeback(struct inode *inode)
1330{ 1333{
1334 ihold(inode);
1331 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1335 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1332 &ceph_inode(inode)->i_wb_work)) { 1336 &ceph_inode(inode)->i_wb_work)) {
1333 dout("ceph_queue_writeback %p\n", inode); 1337 dout("ceph_queue_writeback %p\n", inode);
1334 ihold(inode);
1335 } else { 1338 } else {
1336 dout("ceph_queue_writeback %p failed\n", inode); 1339 dout("ceph_queue_writeback %p failed\n", inode);
1340 iput(inode);
1337 } 1341 }
1338} 1342}
1339 1343
@@ -1353,12 +1357,13 @@ static void ceph_writeback_work(struct work_struct *work)
1353 */ 1357 */
1354void ceph_queue_invalidate(struct inode *inode) 1358void ceph_queue_invalidate(struct inode *inode)
1355{ 1359{
1360 ihold(inode);
1356 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1361 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1357 &ceph_inode(inode)->i_pg_inv_work)) { 1362 &ceph_inode(inode)->i_pg_inv_work)) {
1358 dout("ceph_queue_invalidate %p\n", inode); 1363 dout("ceph_queue_invalidate %p\n", inode);
1359 ihold(inode);
1360 } else { 1364 } else {
1361 dout("ceph_queue_invalidate %p failed\n", inode); 1365 dout("ceph_queue_invalidate %p failed\n", inode);
1366 iput(inode);
1362 } 1367 }
1363} 1368}
1364 1369
@@ -1374,20 +1379,20 @@ static void ceph_invalidate_work(struct work_struct *work)
1374 u32 orig_gen; 1379 u32 orig_gen;
1375 int check = 0; 1380 int check = 0;
1376 1381
1377 spin_lock(&inode->i_lock); 1382 spin_lock(&ci->i_ceph_lock);
1378 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1383 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1379 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1384 ci->i_rdcache_gen, ci->i_rdcache_revoking);
1380 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 1385 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1381 /* nevermind! */ 1386 /* nevermind! */
1382 spin_unlock(&inode->i_lock); 1387 spin_unlock(&ci->i_ceph_lock);
1383 goto out; 1388 goto out;
1384 } 1389 }
1385 orig_gen = ci->i_rdcache_gen; 1390 orig_gen = ci->i_rdcache_gen;
1386 spin_unlock(&inode->i_lock); 1391 spin_unlock(&ci->i_ceph_lock);
1387 1392
1388 truncate_inode_pages(&inode->i_data, 0); 1393 truncate_inode_pages(&inode->i_data, 0);
1389 1394
1390 spin_lock(&inode->i_lock); 1395 spin_lock(&ci->i_ceph_lock);
1391 if (orig_gen == ci->i_rdcache_gen && 1396 if (orig_gen == ci->i_rdcache_gen &&
1392 orig_gen == ci->i_rdcache_revoking) { 1397 orig_gen == ci->i_rdcache_revoking) {
1393 dout("invalidate_pages %p gen %d successful\n", inode, 1398 dout("invalidate_pages %p gen %d successful\n", inode,
@@ -1399,7 +1404,7 @@ static void ceph_invalidate_work(struct work_struct *work)
1399 inode, orig_gen, ci->i_rdcache_gen, 1404 inode, orig_gen, ci->i_rdcache_gen,
1400 ci->i_rdcache_revoking); 1405 ci->i_rdcache_revoking);
1401 } 1406 }
1402 spin_unlock(&inode->i_lock); 1407 spin_unlock(&ci->i_ceph_lock);
1403 1408
1404 if (check) 1409 if (check)
1405 ceph_check_caps(ci, 0, NULL); 1410 ceph_check_caps(ci, 0, NULL);
@@ -1434,13 +1439,14 @@ void ceph_queue_vmtruncate(struct inode *inode)
1434{ 1439{
1435 struct ceph_inode_info *ci = ceph_inode(inode); 1440 struct ceph_inode_info *ci = ceph_inode(inode);
1436 1441
1442 ihold(inode);
1437 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1443 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1438 &ci->i_vmtruncate_work)) { 1444 &ci->i_vmtruncate_work)) {
1439 dout("ceph_queue_vmtruncate %p\n", inode); 1445 dout("ceph_queue_vmtruncate %p\n", inode);
1440 ihold(inode);
1441 } else { 1446 } else {
1442 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1447 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1443 inode, ci->i_truncate_pending); 1448 inode, ci->i_truncate_pending);
1449 iput(inode);
1444 } 1450 }
1445} 1451}
1446 1452
@@ -1457,10 +1463,10 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
1457 int wrbuffer_refs, wake = 0; 1463 int wrbuffer_refs, wake = 0;
1458 1464
1459retry: 1465retry:
1460 spin_lock(&inode->i_lock); 1466 spin_lock(&ci->i_ceph_lock);
1461 if (ci->i_truncate_pending == 0) { 1467 if (ci->i_truncate_pending == 0) {
1462 dout("__do_pending_vmtruncate %p none pending\n", inode); 1468 dout("__do_pending_vmtruncate %p none pending\n", inode);
1463 spin_unlock(&inode->i_lock); 1469 spin_unlock(&ci->i_ceph_lock);
1464 return; 1470 return;
1465 } 1471 }
1466 1472
@@ -1471,7 +1477,7 @@ retry:
1471 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 1477 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1472 dout("__do_pending_vmtruncate %p flushing snaps first\n", 1478 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1473 inode); 1479 inode);
1474 spin_unlock(&inode->i_lock); 1480 spin_unlock(&ci->i_ceph_lock);
1475 filemap_write_and_wait_range(&inode->i_data, 0, 1481 filemap_write_and_wait_range(&inode->i_data, 0,
1476 inode->i_sb->s_maxbytes); 1482 inode->i_sb->s_maxbytes);
1477 goto retry; 1483 goto retry;
@@ -1481,15 +1487,15 @@ retry:
1481 wrbuffer_refs = ci->i_wrbuffer_ref; 1487 wrbuffer_refs = ci->i_wrbuffer_ref;
1482 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, 1488 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1483 ci->i_truncate_pending, to); 1489 ci->i_truncate_pending, to);
1484 spin_unlock(&inode->i_lock); 1490 spin_unlock(&ci->i_ceph_lock);
1485 1491
1486 truncate_inode_pages(inode->i_mapping, to); 1492 truncate_inode_pages(inode->i_mapping, to);
1487 1493
1488 spin_lock(&inode->i_lock); 1494 spin_lock(&ci->i_ceph_lock);
1489 ci->i_truncate_pending--; 1495 ci->i_truncate_pending--;
1490 if (ci->i_truncate_pending == 0) 1496 if (ci->i_truncate_pending == 0)
1491 wake = 1; 1497 wake = 1;
1492 spin_unlock(&inode->i_lock); 1498 spin_unlock(&ci->i_ceph_lock);
1493 1499
1494 if (wrbuffer_refs == 0) 1500 if (wrbuffer_refs == 0)
1495 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1501 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -1544,7 +1550,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1544 if (IS_ERR(req)) 1550 if (IS_ERR(req))
1545 return PTR_ERR(req); 1551 return PTR_ERR(req);
1546 1552
1547 spin_lock(&inode->i_lock); 1553 spin_lock(&ci->i_ceph_lock);
1548 issued = __ceph_caps_issued(ci, NULL); 1554 issued = __ceph_caps_issued(ci, NULL);
1549 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 1555 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1550 1556
@@ -1692,7 +1698,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1692 } 1698 }
1693 1699
1694 release &= issued; 1700 release &= issued;
1695 spin_unlock(&inode->i_lock); 1701 spin_unlock(&ci->i_ceph_lock);
1696 1702
1697 if (inode_dirty_flags) 1703 if (inode_dirty_flags)
1698 __mark_inode_dirty(inode, inode_dirty_flags); 1704 __mark_inode_dirty(inode, inode_dirty_flags);
@@ -1714,7 +1720,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1714 __ceph_do_pending_vmtruncate(inode); 1720 __ceph_do_pending_vmtruncate(inode);
1715 return err; 1721 return err;
1716out: 1722out:
1717 spin_unlock(&inode->i_lock); 1723 spin_unlock(&ci->i_ceph_lock);
1718 ceph_mdsc_put_request(req); 1724 ceph_mdsc_put_request(req);
1719 return err; 1725 return err;
1720} 1726}
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 5a14c29cbba6..790914a598dd 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -241,11 +241,11 @@ static long ceph_ioctl_lazyio(struct file *file)
241 struct ceph_inode_info *ci = ceph_inode(inode); 241 struct ceph_inode_info *ci = ceph_inode(inode);
242 242
243 if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) { 243 if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
244 spin_lock(&inode->i_lock); 244 spin_lock(&ci->i_ceph_lock);
245 ci->i_nr_by_mode[fi->fmode]--; 245 ci->i_nr_by_mode[fi->fmode]--;
246 fi->fmode |= CEPH_FILE_MODE_LAZY; 246 fi->fmode |= CEPH_FILE_MODE_LAZY;
247 ci->i_nr_by_mode[fi->fmode]++; 247 ci->i_nr_by_mode[fi->fmode]++;
248 spin_unlock(&inode->i_lock); 248 spin_unlock(&ci->i_ceph_lock);
249 dout("ioctl_layzio: file %p marked lazy\n", file); 249 dout("ioctl_layzio: file %p marked lazy\n", file);
250 250
251 ceph_check_caps(ci, 0, NULL); 251 ceph_check_caps(ci, 0, NULL);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 264ab701154f..6203d805eb45 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -732,21 +732,21 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
732 } 732 }
733 } 733 }
734 734
735 spin_lock(&inode->i_lock); 735 spin_lock(&ci->i_ceph_lock);
736 cap = NULL; 736 cap = NULL;
737 if (mode == USE_AUTH_MDS) 737 if (mode == USE_AUTH_MDS)
738 cap = ci->i_auth_cap; 738 cap = ci->i_auth_cap;
739 if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) 739 if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
740 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); 740 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
741 if (!cap) { 741 if (!cap) {
742 spin_unlock(&inode->i_lock); 742 spin_unlock(&ci->i_ceph_lock);
743 goto random; 743 goto random;
744 } 744 }
745 mds = cap->session->s_mds; 745 mds = cap->session->s_mds;
746 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n", 746 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
747 inode, ceph_vinop(inode), mds, 747 inode, ceph_vinop(inode), mds,
748 cap == ci->i_auth_cap ? "auth " : "", cap); 748 cap == ci->i_auth_cap ? "auth " : "", cap);
749 spin_unlock(&inode->i_lock); 749 spin_unlock(&ci->i_ceph_lock);
750 return mds; 750 return mds;
751 751
752random: 752random:
@@ -951,7 +951,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
951 951
952 dout("removing cap %p, ci is %p, inode is %p\n", 952 dout("removing cap %p, ci is %p, inode is %p\n",
953 cap, ci, &ci->vfs_inode); 953 cap, ci, &ci->vfs_inode);
954 spin_lock(&inode->i_lock); 954 spin_lock(&ci->i_ceph_lock);
955 __ceph_remove_cap(cap); 955 __ceph_remove_cap(cap);
956 if (!__ceph_is_any_real_caps(ci)) { 956 if (!__ceph_is_any_real_caps(ci)) {
957 struct ceph_mds_client *mdsc = 957 struct ceph_mds_client *mdsc =
@@ -984,7 +984,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
984 } 984 }
985 spin_unlock(&mdsc->cap_dirty_lock); 985 spin_unlock(&mdsc->cap_dirty_lock);
986 } 986 }
987 spin_unlock(&inode->i_lock); 987 spin_unlock(&ci->i_ceph_lock);
988 while (drop--) 988 while (drop--)
989 iput(inode); 989 iput(inode);
990 return 0; 990 return 0;
@@ -1015,10 +1015,10 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1015 1015
1016 wake_up_all(&ci->i_cap_wq); 1016 wake_up_all(&ci->i_cap_wq);
1017 if (arg) { 1017 if (arg) {
1018 spin_lock(&inode->i_lock); 1018 spin_lock(&ci->i_ceph_lock);
1019 ci->i_wanted_max_size = 0; 1019 ci->i_wanted_max_size = 0;
1020 ci->i_requested_max_size = 0; 1020 ci->i_requested_max_size = 0;
1021 spin_unlock(&inode->i_lock); 1021 spin_unlock(&ci->i_ceph_lock);
1022 } 1022 }
1023 return 0; 1023 return 0;
1024} 1024}
@@ -1151,7 +1151,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1151 if (session->s_trim_caps <= 0) 1151 if (session->s_trim_caps <= 0)
1152 return -1; 1152 return -1;
1153 1153
1154 spin_lock(&inode->i_lock); 1154 spin_lock(&ci->i_ceph_lock);
1155 mine = cap->issued | cap->implemented; 1155 mine = cap->issued | cap->implemented;
1156 used = __ceph_caps_used(ci); 1156 used = __ceph_caps_used(ci);
1157 oissued = __ceph_caps_issued_other(ci, cap); 1157 oissued = __ceph_caps_issued_other(ci, cap);
@@ -1170,7 +1170,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1170 __ceph_remove_cap(cap); 1170 __ceph_remove_cap(cap);
1171 } else { 1171 } else {
1172 /* try to drop referring dentries */ 1172 /* try to drop referring dentries */
1173 spin_unlock(&inode->i_lock); 1173 spin_unlock(&ci->i_ceph_lock);
1174 d_prune_aliases(inode); 1174 d_prune_aliases(inode);
1175 dout("trim_caps_cb %p cap %p pruned, count now %d\n", 1175 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1176 inode, cap, atomic_read(&inode->i_count)); 1176 inode, cap, atomic_read(&inode->i_count));
@@ -1178,7 +1178,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1178 } 1178 }
1179 1179
1180out: 1180out:
1181 spin_unlock(&inode->i_lock); 1181 spin_unlock(&ci->i_ceph_lock);
1182 return 0; 1182 return 0;
1183} 1183}
1184 1184
@@ -1296,7 +1296,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
1296 i_flushing_item); 1296 i_flushing_item);
1297 struct inode *inode = &ci->vfs_inode; 1297 struct inode *inode = &ci->vfs_inode;
1298 1298
1299 spin_lock(&inode->i_lock); 1299 spin_lock(&ci->i_ceph_lock);
1300 if (ci->i_cap_flush_seq <= want_flush_seq) { 1300 if (ci->i_cap_flush_seq <= want_flush_seq) {
1301 dout("check_cap_flush still flushing %p " 1301 dout("check_cap_flush still flushing %p "
1302 "seq %lld <= %lld to mds%d\n", inode, 1302 "seq %lld <= %lld to mds%d\n", inode,
@@ -1304,7 +1304,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
1304 session->s_mds); 1304 session->s_mds);
1305 ret = 0; 1305 ret = 0;
1306 } 1306 }
1307 spin_unlock(&inode->i_lock); 1307 spin_unlock(&ci->i_ceph_lock);
1308 } 1308 }
1309 mutex_unlock(&session->s_mutex); 1309 mutex_unlock(&session->s_mutex);
1310 ceph_put_mds_session(session); 1310 ceph_put_mds_session(session);
@@ -1495,6 +1495,7 @@ retry:
1495 pos, temp); 1495 pos, temp);
1496 } else if (stop_on_nosnap && inode && 1496 } else if (stop_on_nosnap && inode &&
1497 ceph_snap(inode) == CEPH_NOSNAP) { 1497 ceph_snap(inode) == CEPH_NOSNAP) {
1498 spin_unlock(&temp->d_lock);
1498 break; 1499 break;
1499 } else { 1500 } else {
1500 pos -= temp->d_name.len; 1501 pos -= temp->d_name.len;
@@ -2011,10 +2012,10 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req)
2011 struct ceph_inode_info *ci = ceph_inode(inode); 2012 struct ceph_inode_info *ci = ceph_inode(inode);
2012 2013
2013 dout("invalidate_dir_request %p (D_COMPLETE, lease(s))\n", inode); 2014 dout("invalidate_dir_request %p (D_COMPLETE, lease(s))\n", inode);
2014 spin_lock(&inode->i_lock); 2015 spin_lock(&ci->i_ceph_lock);
2015 ceph_dir_clear_complete(inode); 2016 ceph_dir_clear_complete(inode);
2016 ci->i_release_count++; 2017 ci->i_release_count++;
2017 spin_unlock(&inode->i_lock); 2018 spin_unlock(&ci->i_ceph_lock);
2018 2019
2019 if (req->r_dentry) 2020 if (req->r_dentry)
2020 ceph_invalidate_dentry_lease(req->r_dentry); 2021 ceph_invalidate_dentry_lease(req->r_dentry);
@@ -2422,7 +2423,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2422 if (err) 2423 if (err)
2423 goto out_free; 2424 goto out_free;
2424 2425
2425 spin_lock(&inode->i_lock); 2426 spin_lock(&ci->i_ceph_lock);
2426 cap->seq = 0; /* reset cap seq */ 2427 cap->seq = 0; /* reset cap seq */
2427 cap->issue_seq = 0; /* and issue_seq */ 2428 cap->issue_seq = 0; /* and issue_seq */
2428 2429
@@ -2445,7 +2446,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2445 rec.v1.pathbase = cpu_to_le64(pathbase); 2446 rec.v1.pathbase = cpu_to_le64(pathbase);
2446 reclen = sizeof(rec.v1); 2447 reclen = sizeof(rec.v1);
2447 } 2448 }
2448 spin_unlock(&inode->i_lock); 2449 spin_unlock(&ci->i_ceph_lock);
2449 2450
2450 if (recon_state->flock) { 2451 if (recon_state->flock) {
2451 int num_fcntl_locks, num_flock_locks; 2452 int num_fcntl_locks, num_flock_locks;
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 4bb239921dbd..a50ca0e39475 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -20,7 +20,7 @@
20 * 20 *
21 * mdsc->snap_rwsem 21 * mdsc->snap_rwsem
22 * 22 *
23 * inode->i_lock 23 * ci->i_ceph_lock
24 * mdsc->snap_flush_lock 24 * mdsc->snap_flush_lock
25 * mdsc->cap_delay_lock 25 * mdsc->cap_delay_lock
26 * 26 *
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index e26437191333..a559c80f127a 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -446,7 +446,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
446 return; 446 return;
447 } 447 }
448 448
449 spin_lock(&inode->i_lock); 449 spin_lock(&ci->i_ceph_lock);
450 used = __ceph_caps_used(ci); 450 used = __ceph_caps_used(ci);
451 dirty = __ceph_caps_dirty(ci); 451 dirty = __ceph_caps_dirty(ci);
452 452
@@ -528,7 +528,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
528 kfree(capsnap); 528 kfree(capsnap);
529 } 529 }
530 530
531 spin_unlock(&inode->i_lock); 531 spin_unlock(&ci->i_ceph_lock);
532} 532}
533 533
534/* 534/*
@@ -537,7 +537,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
537 * 537 *
538 * If capsnap can now be flushed, add to snap_flush list, and return 1. 538 * If capsnap can now be flushed, add to snap_flush list, and return 1.
539 * 539 *
540 * Caller must hold i_lock. 540 * Caller must hold i_ceph_lock.
541 */ 541 */
542int __ceph_finish_cap_snap(struct ceph_inode_info *ci, 542int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
543 struct ceph_cap_snap *capsnap) 543 struct ceph_cap_snap *capsnap)
@@ -739,9 +739,9 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
739 inode = &ci->vfs_inode; 739 inode = &ci->vfs_inode;
740 ihold(inode); 740 ihold(inode);
741 spin_unlock(&mdsc->snap_flush_lock); 741 spin_unlock(&mdsc->snap_flush_lock);
742 spin_lock(&inode->i_lock); 742 spin_lock(&ci->i_ceph_lock);
743 __ceph_flush_snaps(ci, &session, 0); 743 __ceph_flush_snaps(ci, &session, 0);
744 spin_unlock(&inode->i_lock); 744 spin_unlock(&ci->i_ceph_lock);
745 iput(inode); 745 iput(inode);
746 spin_lock(&mdsc->snap_flush_lock); 746 spin_lock(&mdsc->snap_flush_lock);
747 } 747 }
@@ -847,7 +847,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
847 continue; 847 continue;
848 ci = ceph_inode(inode); 848 ci = ceph_inode(inode);
849 849
850 spin_lock(&inode->i_lock); 850 spin_lock(&ci->i_ceph_lock);
851 if (!ci->i_snap_realm) 851 if (!ci->i_snap_realm)
852 goto skip_inode; 852 goto skip_inode;
853 /* 853 /*
@@ -876,7 +876,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
876 oldrealm = ci->i_snap_realm; 876 oldrealm = ci->i_snap_realm;
877 ci->i_snap_realm = realm; 877 ci->i_snap_realm = realm;
878 spin_unlock(&realm->inodes_with_caps_lock); 878 spin_unlock(&realm->inodes_with_caps_lock);
879 spin_unlock(&inode->i_lock); 879 spin_unlock(&ci->i_ceph_lock);
880 880
881 ceph_get_snap_realm(mdsc, realm); 881 ceph_get_snap_realm(mdsc, realm);
882 ceph_put_snap_realm(mdsc, oldrealm); 882 ceph_put_snap_realm(mdsc, oldrealm);
@@ -885,7 +885,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
885 continue; 885 continue;
886 886
887skip_inode: 887skip_inode:
888 spin_unlock(&inode->i_lock); 888 spin_unlock(&ci->i_ceph_lock);
889 iput(inode); 889 iput(inode);
890 } 890 }
891 891
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index a90846fac759..b48f15f101a0 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -383,7 +383,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
383 if (fsopt->rsize != CEPH_RSIZE_DEFAULT) 383 if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
384 seq_printf(m, ",rsize=%d", fsopt->rsize); 384 seq_printf(m, ",rsize=%d", fsopt->rsize);
385 if (fsopt->rasize != CEPH_RASIZE_DEFAULT) 385 if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
386 seq_printf(m, ",rasize=%d", fsopt->rsize); 386 seq_printf(m, ",rasize=%d", fsopt->rasize);
387 if (fsopt->congestion_kb != default_congestion_kb()) 387 if (fsopt->congestion_kb != default_congestion_kb())
388 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb); 388 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
389 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT) 389 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
@@ -638,10 +638,12 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
638 if (err == 0) { 638 if (err == 0) {
639 dout("open_root_inode success\n"); 639 dout("open_root_inode success\n");
640 if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT && 640 if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
641 fsc->sb->s_root == NULL) 641 fsc->sb->s_root == NULL) {
642 root = d_alloc_root(req->r_target_inode); 642 root = d_alloc_root(req->r_target_inode);
643 else 643 ceph_init_dentry(root);
644 } else {
644 root = d_obtain_alias(req->r_target_inode); 645 root = d_obtain_alias(req->r_target_inode);
646 }
645 req->r_target_inode = NULL; 647 req->r_target_inode = NULL;
646 dout("open_root_inode success, root dentry is %p\n", root); 648 dout("open_root_inode success, root dentry is %p\n", root);
647 } else { 649 } else {
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 01bf189e08a9..edcbf3774a56 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -220,7 +220,7 @@ struct ceph_dentry_info {
220 * The locking for D_COMPLETE is a bit odd: 220 * The locking for D_COMPLETE is a bit odd:
221 * - we can clear it at almost any time (see ceph_d_prune) 221 * - we can clear it at almost any time (see ceph_d_prune)
222 * - it is only meaningful if: 222 * - it is only meaningful if:
223 * - we hold dir inode i_lock 223 * - we hold dir inode i_ceph_lock
224 * - we hold dir FILE_SHARED caps 224 * - we hold dir FILE_SHARED caps
225 * - the dentry D_COMPLETE is set 225 * - the dentry D_COMPLETE is set
226 */ 226 */
@@ -250,6 +250,8 @@ struct ceph_inode_xattrs_info {
250struct ceph_inode_info { 250struct ceph_inode_info {
251 struct ceph_vino i_vino; /* ceph ino + snap */ 251 struct ceph_vino i_vino; /* ceph ino + snap */
252 252
253 spinlock_t i_ceph_lock;
254
253 u64 i_version; 255 u64 i_version;
254 u32 i_time_warp_seq; 256 u32 i_time_warp_seq;
255 257
@@ -271,7 +273,7 @@ struct ceph_inode_info {
271 273
272 struct ceph_inode_xattrs_info i_xattrs; 274 struct ceph_inode_xattrs_info i_xattrs;
273 275
274 /* capabilities. protected _both_ by i_lock and cap->session's 276 /* capabilities. protected _both_ by i_ceph_lock and cap->session's
275 * s_mutex. */ 277 * s_mutex. */
276 struct rb_root i_caps; /* cap list */ 278 struct rb_root i_caps; /* cap list */
277 struct ceph_cap *i_auth_cap; /* authoritative cap, if any */ 279 struct ceph_cap *i_auth_cap; /* authoritative cap, if any */
@@ -437,18 +439,18 @@ static inline void ceph_i_clear(struct inode *inode, unsigned mask)
437{ 439{
438 struct ceph_inode_info *ci = ceph_inode(inode); 440 struct ceph_inode_info *ci = ceph_inode(inode);
439 441
440 spin_lock(&inode->i_lock); 442 spin_lock(&ci->i_ceph_lock);
441 ci->i_ceph_flags &= ~mask; 443 ci->i_ceph_flags &= ~mask;
442 spin_unlock(&inode->i_lock); 444 spin_unlock(&ci->i_ceph_lock);
443} 445}
444 446
445static inline void ceph_i_set(struct inode *inode, unsigned mask) 447static inline void ceph_i_set(struct inode *inode, unsigned mask)
446{ 448{
447 struct ceph_inode_info *ci = ceph_inode(inode); 449 struct ceph_inode_info *ci = ceph_inode(inode);
448 450
449 spin_lock(&inode->i_lock); 451 spin_lock(&ci->i_ceph_lock);
450 ci->i_ceph_flags |= mask; 452 ci->i_ceph_flags |= mask;
451 spin_unlock(&inode->i_lock); 453 spin_unlock(&ci->i_ceph_lock);
452} 454}
453 455
454static inline bool ceph_i_test(struct inode *inode, unsigned mask) 456static inline bool ceph_i_test(struct inode *inode, unsigned mask)
@@ -456,9 +458,9 @@ static inline bool ceph_i_test(struct inode *inode, unsigned mask)
456 struct ceph_inode_info *ci = ceph_inode(inode); 458 struct ceph_inode_info *ci = ceph_inode(inode);
457 bool r; 459 bool r;
458 460
459 spin_lock(&inode->i_lock); 461 spin_lock(&ci->i_ceph_lock);
460 r = (ci->i_ceph_flags & mask) == mask; 462 r = (ci->i_ceph_flags & mask) == mask;
461 spin_unlock(&inode->i_lock); 463 spin_unlock(&ci->i_ceph_lock);
462 return r; 464 return r;
463} 465}
464 466
@@ -508,9 +510,9 @@ extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
508static inline int ceph_caps_issued(struct ceph_inode_info *ci) 510static inline int ceph_caps_issued(struct ceph_inode_info *ci)
509{ 511{
510 int issued; 512 int issued;
511 spin_lock(&ci->vfs_inode.i_lock); 513 spin_lock(&ci->i_ceph_lock);
512 issued = __ceph_caps_issued(ci, NULL); 514 issued = __ceph_caps_issued(ci, NULL);
513 spin_unlock(&ci->vfs_inode.i_lock); 515 spin_unlock(&ci->i_ceph_lock);
514 return issued; 516 return issued;
515} 517}
516 518
@@ -518,9 +520,9 @@ static inline int ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask,
518 int touch) 520 int touch)
519{ 521{
520 int r; 522 int r;
521 spin_lock(&ci->vfs_inode.i_lock); 523 spin_lock(&ci->i_ceph_lock);
522 r = __ceph_caps_issued_mask(ci, mask, touch); 524 r = __ceph_caps_issued_mask(ci, mask, touch);
523 spin_unlock(&ci->vfs_inode.i_lock); 525 spin_unlock(&ci->i_ceph_lock);
524 return r; 526 return r;
525} 527}
526 528
@@ -743,10 +745,9 @@ extern int ceph_add_cap(struct inode *inode,
743extern void __ceph_remove_cap(struct ceph_cap *cap); 745extern void __ceph_remove_cap(struct ceph_cap *cap);
744static inline void ceph_remove_cap(struct ceph_cap *cap) 746static inline void ceph_remove_cap(struct ceph_cap *cap)
745{ 747{
746 struct inode *inode = &cap->ci->vfs_inode; 748 spin_lock(&cap->ci->i_ceph_lock);
747 spin_lock(&inode->i_lock);
748 __ceph_remove_cap(cap); 749 __ceph_remove_cap(cap);
749 spin_unlock(&inode->i_lock); 750 spin_unlock(&cap->ci->i_ceph_lock);
750} 751}
751extern void ceph_put_cap(struct ceph_mds_client *mdsc, 752extern void ceph_put_cap(struct ceph_mds_client *mdsc,
752 struct ceph_cap *cap); 753 struct ceph_cap *cap);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 96c6739a0280..a5e36e4488a7 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -343,8 +343,8 @@ void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
343} 343}
344 344
345static int __build_xattrs(struct inode *inode) 345static int __build_xattrs(struct inode *inode)
346 __releases(inode->i_lock) 346 __releases(ci->i_ceph_lock)
347 __acquires(inode->i_lock) 347 __acquires(ci->i_ceph_lock)
348{ 348{
349 u32 namelen; 349 u32 namelen;
350 u32 numattr = 0; 350 u32 numattr = 0;
@@ -372,7 +372,7 @@ start:
372 end = p + ci->i_xattrs.blob->vec.iov_len; 372 end = p + ci->i_xattrs.blob->vec.iov_len;
373 ceph_decode_32_safe(&p, end, numattr, bad); 373 ceph_decode_32_safe(&p, end, numattr, bad);
374 xattr_version = ci->i_xattrs.version; 374 xattr_version = ci->i_xattrs.version;
375 spin_unlock(&inode->i_lock); 375 spin_unlock(&ci->i_ceph_lock);
376 376
377 xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *), 377 xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
378 GFP_NOFS); 378 GFP_NOFS);
@@ -387,7 +387,7 @@ start:
387 goto bad_lock; 387 goto bad_lock;
388 } 388 }
389 389
390 spin_lock(&inode->i_lock); 390 spin_lock(&ci->i_ceph_lock);
391 if (ci->i_xattrs.version != xattr_version) { 391 if (ci->i_xattrs.version != xattr_version) {
392 /* lost a race, retry */ 392 /* lost a race, retry */
393 for (i = 0; i < numattr; i++) 393 for (i = 0; i < numattr; i++)
@@ -418,7 +418,7 @@ start:
418 418
419 return err; 419 return err;
420bad_lock: 420bad_lock:
421 spin_lock(&inode->i_lock); 421 spin_lock(&ci->i_ceph_lock);
422bad: 422bad:
423 if (xattrs) { 423 if (xattrs) {
424 for (i = 0; i < numattr; i++) 424 for (i = 0; i < numattr; i++)
@@ -512,7 +512,7 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
512 if (vxattrs) 512 if (vxattrs)
513 vxattr = ceph_match_vxattr(vxattrs, name); 513 vxattr = ceph_match_vxattr(vxattrs, name);
514 514
515 spin_lock(&inode->i_lock); 515 spin_lock(&ci->i_ceph_lock);
516 dout("getxattr %p ver=%lld index_ver=%lld\n", inode, 516 dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
517 ci->i_xattrs.version, ci->i_xattrs.index_version); 517 ci->i_xattrs.version, ci->i_xattrs.index_version);
518 518
@@ -520,14 +520,14 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
520 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) { 520 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
521 goto get_xattr; 521 goto get_xattr;
522 } else { 522 } else {
523 spin_unlock(&inode->i_lock); 523 spin_unlock(&ci->i_ceph_lock);
524 /* get xattrs from mds (if we don't already have them) */ 524 /* get xattrs from mds (if we don't already have them) */
525 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR); 525 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
526 if (err) 526 if (err)
527 return err; 527 return err;
528 } 528 }
529 529
530 spin_lock(&inode->i_lock); 530 spin_lock(&ci->i_ceph_lock);
531 531
532 if (vxattr && vxattr->readonly) { 532 if (vxattr && vxattr->readonly) {
533 err = vxattr->getxattr_cb(ci, value, size); 533 err = vxattr->getxattr_cb(ci, value, size);
@@ -558,7 +558,7 @@ get_xattr:
558 memcpy(value, xattr->val, xattr->val_len); 558 memcpy(value, xattr->val, xattr->val_len);
559 559
560out: 560out:
561 spin_unlock(&inode->i_lock); 561 spin_unlock(&ci->i_ceph_lock);
562 return err; 562 return err;
563} 563}
564 564
@@ -573,7 +573,7 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
573 u32 len; 573 u32 len;
574 int i; 574 int i;
575 575
576 spin_lock(&inode->i_lock); 576 spin_lock(&ci->i_ceph_lock);
577 dout("listxattr %p ver=%lld index_ver=%lld\n", inode, 577 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
578 ci->i_xattrs.version, ci->i_xattrs.index_version); 578 ci->i_xattrs.version, ci->i_xattrs.index_version);
579 579
@@ -581,13 +581,13 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
581 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) { 581 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
582 goto list_xattr; 582 goto list_xattr;
583 } else { 583 } else {
584 spin_unlock(&inode->i_lock); 584 spin_unlock(&ci->i_ceph_lock);
585 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR); 585 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
586 if (err) 586 if (err)
587 return err; 587 return err;
588 } 588 }
589 589
590 spin_lock(&inode->i_lock); 590 spin_lock(&ci->i_ceph_lock);
591 591
592 err = __build_xattrs(inode); 592 err = __build_xattrs(inode);
593 if (err < 0) 593 if (err < 0)
@@ -619,7 +619,7 @@ list_xattr:
619 } 619 }
620 620
621out: 621out:
622 spin_unlock(&inode->i_lock); 622 spin_unlock(&ci->i_ceph_lock);
623 return err; 623 return err;
624} 624}
625 625
@@ -739,7 +739,7 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
739 if (!xattr) 739 if (!xattr)
740 goto out; 740 goto out;
741 741
742 spin_lock(&inode->i_lock); 742 spin_lock(&ci->i_ceph_lock);
743retry: 743retry:
744 issued = __ceph_caps_issued(ci, NULL); 744 issued = __ceph_caps_issued(ci, NULL);
745 if (!(issued & CEPH_CAP_XATTR_EXCL)) 745 if (!(issued & CEPH_CAP_XATTR_EXCL))
@@ -752,12 +752,12 @@ retry:
752 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) { 752 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
753 struct ceph_buffer *blob = NULL; 753 struct ceph_buffer *blob = NULL;
754 754
755 spin_unlock(&inode->i_lock); 755 spin_unlock(&ci->i_ceph_lock);
756 dout(" preaallocating new blob size=%d\n", required_blob_size); 756 dout(" preaallocating new blob size=%d\n", required_blob_size);
757 blob = ceph_buffer_new(required_blob_size, GFP_NOFS); 757 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
758 if (!blob) 758 if (!blob)
759 goto out; 759 goto out;
760 spin_lock(&inode->i_lock); 760 spin_lock(&ci->i_ceph_lock);
761 if (ci->i_xattrs.prealloc_blob) 761 if (ci->i_xattrs.prealloc_blob)
762 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 762 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
763 ci->i_xattrs.prealloc_blob = blob; 763 ci->i_xattrs.prealloc_blob = blob;
@@ -770,13 +770,13 @@ retry:
770 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); 770 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
771 ci->i_xattrs.dirty = true; 771 ci->i_xattrs.dirty = true;
772 inode->i_ctime = CURRENT_TIME; 772 inode->i_ctime = CURRENT_TIME;
773 spin_unlock(&inode->i_lock); 773 spin_unlock(&ci->i_ceph_lock);
774 if (dirty) 774 if (dirty)
775 __mark_inode_dirty(inode, dirty); 775 __mark_inode_dirty(inode, dirty);
776 return err; 776 return err;
777 777
778do_sync: 778do_sync:
779 spin_unlock(&inode->i_lock); 779 spin_unlock(&ci->i_ceph_lock);
780 err = ceph_sync_setxattr(dentry, name, value, size, flags); 780 err = ceph_sync_setxattr(dentry, name, value, size, flags);
781out: 781out:
782 kfree(newname); 782 kfree(newname);
@@ -833,7 +833,7 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
833 return -EOPNOTSUPP; 833 return -EOPNOTSUPP;
834 } 834 }
835 835
836 spin_lock(&inode->i_lock); 836 spin_lock(&ci->i_ceph_lock);
837 __build_xattrs(inode); 837 __build_xattrs(inode);
838 issued = __ceph_caps_issued(ci, NULL); 838 issued = __ceph_caps_issued(ci, NULL);
839 dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued)); 839 dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
@@ -846,12 +846,12 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
846 ci->i_xattrs.dirty = true; 846 ci->i_xattrs.dirty = true;
847 inode->i_ctime = CURRENT_TIME; 847 inode->i_ctime = CURRENT_TIME;
848 848
849 spin_unlock(&inode->i_lock); 849 spin_unlock(&ci->i_ceph_lock);
850 if (dirty) 850 if (dirty)
851 __mark_inode_dirty(inode, dirty); 851 __mark_inode_dirty(inode, dirty);
852 return err; 852 return err;
853do_sync: 853do_sync:
854 spin_unlock(&inode->i_lock); 854 spin_unlock(&ci->i_ceph_lock);
855 err = ceph_send_removexattr(dentry, name); 855 err = ceph_send_removexattr(dentry, name);
856 return err; 856 return err;
857} 857}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d6a972df0338..8cd4b52d4217 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -441,6 +441,8 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
441 smb_msg.msg_controllen = 0; 441 smb_msg.msg_controllen = 0;
442 442
443 for (total_read = 0; to_read; total_read += length, to_read -= length) { 443 for (total_read = 0; to_read; total_read += length, to_read -= length) {
444 try_to_freeze();
445
444 if (server_unresponsive(server)) { 446 if (server_unresponsive(server)) {
445 total_read = -EAGAIN; 447 total_read = -EAGAIN;
446 break; 448 break;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index cf0b1539b321..4dd9283885e7 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -702,6 +702,13 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
702 lock->type, lock->netfid, conf_lock); 702 lock->type, lock->netfid, conf_lock);
703} 703}
704 704
705/*
706 * Check if there is another lock that prevents us to set the lock (mandatory
707 * style). If such a lock exists, update the flock structure with its
708 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
709 * or leave it the same if we can't. Returns 0 if we don't need to request to
710 * the server or 1 otherwise.
711 */
705static int 712static int
706cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length, 713cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
707 __u8 type, __u16 netfid, struct file_lock *flock) 714 __u8 type, __u16 netfid, struct file_lock *flock)
@@ -739,6 +746,12 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
739 mutex_unlock(&cinode->lock_mutex); 746 mutex_unlock(&cinode->lock_mutex);
740} 747}
741 748
749/*
750 * Set the byte-range lock (mandatory style). Returns:
751 * 1) 0, if we set the lock and don't need to request to the server;
752 * 2) 1, if no locks prevent us but we need to request to the server;
753 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
754 */
742static int 755static int
743cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock, 756cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
744 bool wait) 757 bool wait)
@@ -778,6 +791,13 @@ try_again:
778 return rc; 791 return rc;
779} 792}
780 793
794/*
795 * Check if there is another lock that prevents us to set the lock (posix
796 * style). If such a lock exists, update the flock structure with its
797 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
798 * or leave it the same if we can't. Returns 0 if we don't need to request to
799 * the server or 1 otherwise.
800 */
781static int 801static int
782cifs_posix_lock_test(struct file *file, struct file_lock *flock) 802cifs_posix_lock_test(struct file *file, struct file_lock *flock)
783{ 803{
@@ -800,6 +820,12 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock)
800 return rc; 820 return rc;
801} 821}
802 822
823/*
824 * Set the byte-range lock (posix style). Returns:
825 * 1) 0, if we set the lock and don't need to request to the server;
826 * 2) 1, if we need to request to the server;
827 * 3) <0, if the error occurs while setting the lock.
828 */
803static int 829static int
804cifs_posix_lock_set(struct file *file, struct file_lock *flock) 830cifs_posix_lock_set(struct file *file, struct file_lock *flock)
805{ 831{
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 5de03ec20144..a090bbe6ee29 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -554,7 +554,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
554 rc); 554 rc);
555 return rc; 555 return rc;
556 } 556 }
557 cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); 557 /* FindFirst/Next set last_entry to NULL on malformed reply */
558 if (cifsFile->srch_inf.last_entry)
559 cifs_save_resume_key(cifsFile->srch_inf.last_entry,
560 cifsFile);
558 } 561 }
559 562
560 while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) && 563 while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
@@ -562,7 +565,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
562 cFYI(1, "calling findnext2"); 565 cFYI(1, "calling findnext2");
563 rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, 566 rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
564 &cifsFile->srch_inf); 567 &cifsFile->srch_inf);
565 cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); 568 /* FindFirst/Next set last_entry to NULL on malformed reply */
569 if (cifsFile->srch_inf.last_entry)
570 cifs_save_resume_key(cifsFile->srch_inf.last_entry,
571 cifsFile);
566 if (rc) 572 if (rc)
567 return -ENOENT; 573 return -ENOENT;
568 } 574 }
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 7cacba12b8f1..80d850881938 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -209,7 +209,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
209{ 209{
210 int rc; 210 int rc;
211 int len; 211 int len;
212 __u16 wpwd[129]; 212 __le16 wpwd[129];
213 213
214 /* Password cannot be longer than 128 characters */ 214 /* Password cannot be longer than 128 characters */
215 if (passwd) /* Password must be converted to NT unicode */ 215 if (passwd) /* Password must be converted to NT unicode */
@@ -219,8 +219,8 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
219 *wpwd = 0; /* Ensure string is null terminated */ 219 *wpwd = 0; /* Ensure string is null terminated */
220 } 220 }
221 221
222 rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__u16)); 222 rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
223 memset(wpwd, 0, 129 * sizeof(__u16)); 223 memset(wpwd, 0, 129 * sizeof(__le16));
224 224
225 return rc; 225 return rc;
226} 226}
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index ca418aaf6352..9d8715c45f25 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -292,7 +292,7 @@ int __init configfs_inode_init(void)
292 return bdi_init(&configfs_backing_dev_info); 292 return bdi_init(&configfs_backing_dev_info);
293} 293}
294 294
295void __exit configfs_inode_exit(void) 295void configfs_inode_exit(void)
296{ 296{
297 bdi_destroy(&configfs_backing_dev_info); 297 bdi_destroy(&configfs_backing_dev_info);
298} 298}
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index ecc62178beda..276e15cafd58 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -143,28 +143,26 @@ static int __init configfs_init(void)
143 goto out; 143 goto out;
144 144
145 config_kobj = kobject_create_and_add("config", kernel_kobj); 145 config_kobj = kobject_create_and_add("config", kernel_kobj);
146 if (!config_kobj) { 146 if (!config_kobj)
147 kmem_cache_destroy(configfs_dir_cachep); 147 goto out2;
148 configfs_dir_cachep = NULL; 148
149 goto out; 149 err = configfs_inode_init();
150 } 150 if (err)
151 goto out3;
151 152
152 err = register_filesystem(&configfs_fs_type); 153 err = register_filesystem(&configfs_fs_type);
153 if (err) { 154 if (err)
154 printk(KERN_ERR "configfs: Unable to register filesystem!\n"); 155 goto out4;
155 kobject_put(config_kobj);
156 kmem_cache_destroy(configfs_dir_cachep);
157 configfs_dir_cachep = NULL;
158 goto out;
159 }
160 156
161 err = configfs_inode_init(); 157 return 0;
162 if (err) { 158out4:
163 unregister_filesystem(&configfs_fs_type); 159 printk(KERN_ERR "configfs: Unable to register filesystem!\n");
164 kobject_put(config_kobj); 160 configfs_inode_exit();
165 kmem_cache_destroy(configfs_dir_cachep); 161out3:
166 configfs_dir_cachep = NULL; 162 kobject_put(config_kobj);
167 } 163out2:
164 kmem_cache_destroy(configfs_dir_cachep);
165 configfs_dir_cachep = NULL;
168out: 166out:
169 return err; 167 return err;
170} 168}
diff --git a/fs/dcache.c b/fs/dcache.c
index a901c6901bce..89509b5a090e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -36,6 +36,7 @@
36#include <linux/bit_spinlock.h> 36#include <linux/bit_spinlock.h>
37#include <linux/rculist_bl.h> 37#include <linux/rculist_bl.h>
38#include <linux/prefetch.h> 38#include <linux/prefetch.h>
39#include <linux/ratelimit.h>
39#include "internal.h" 40#include "internal.h"
40 41
41/* 42/*
@@ -2383,8 +2384,16 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2383 actual = __d_unalias(inode, dentry, alias); 2384 actual = __d_unalias(inode, dentry, alias);
2384 } 2385 }
2385 write_sequnlock(&rename_lock); 2386 write_sequnlock(&rename_lock);
2386 if (IS_ERR(actual)) 2387 if (IS_ERR(actual)) {
2388 if (PTR_ERR(actual) == -ELOOP)
2389 pr_warn_ratelimited(
2390 "VFS: Lookup of '%s' in %s %s"
2391 " would have caused loop\n",
2392 dentry->d_name.name,
2393 inode->i_sb->s_type->name,
2394 inode->i_sb->s_id);
2387 dput(alias); 2395 dput(alias);
2396 }
2388 goto out_nolock; 2397 goto out_nolock;
2389 } 2398 }
2390 } 2399 }
@@ -2430,16 +2439,14 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2430/** 2439/**
2431 * prepend_path - Prepend path string to a buffer 2440 * prepend_path - Prepend path string to a buffer
2432 * @path: the dentry/vfsmount to report 2441 * @path: the dentry/vfsmount to report
2433 * @root: root vfsmnt/dentry (may be modified by this function) 2442 * @root: root vfsmnt/dentry
2434 * @buffer: pointer to the end of the buffer 2443 * @buffer: pointer to the end of the buffer
2435 * @buflen: pointer to buffer length 2444 * @buflen: pointer to buffer length
2436 * 2445 *
2437 * Caller holds the rename_lock. 2446 * Caller holds the rename_lock.
2438 *
2439 * If path is not reachable from the supplied root, then the value of
2440 * root is changed (without modifying refcounts).
2441 */ 2447 */
2442static int prepend_path(const struct path *path, struct path *root, 2448static int prepend_path(const struct path *path,
2449 const struct path *root,
2443 char **buffer, int *buflen) 2450 char **buffer, int *buflen)
2444{ 2451{
2445 struct dentry *dentry = path->dentry; 2452 struct dentry *dentry = path->dentry;
@@ -2474,10 +2481,10 @@ static int prepend_path(const struct path *path, struct path *root,
2474 dentry = parent; 2481 dentry = parent;
2475 } 2482 }
2476 2483
2477out:
2478 if (!error && !slash) 2484 if (!error && !slash)
2479 error = prepend(buffer, buflen, "/", 1); 2485 error = prepend(buffer, buflen, "/", 1);
2480 2486
2487out:
2481 br_read_unlock(vfsmount_lock); 2488 br_read_unlock(vfsmount_lock);
2482 return error; 2489 return error;
2483 2490
@@ -2491,15 +2498,17 @@ global_root:
2491 WARN(1, "Root dentry has weird name <%.*s>\n", 2498 WARN(1, "Root dentry has weird name <%.*s>\n",
2492 (int) dentry->d_name.len, dentry->d_name.name); 2499 (int) dentry->d_name.len, dentry->d_name.name);
2493 } 2500 }
2494 root->mnt = vfsmnt; 2501 if (!slash)
2495 root->dentry = dentry; 2502 error = prepend(buffer, buflen, "/", 1);
2503 if (!error)
2504 error = vfsmnt->mnt_ns ? 1 : 2;
2496 goto out; 2505 goto out;
2497} 2506}
2498 2507
2499/** 2508/**
2500 * __d_path - return the path of a dentry 2509 * __d_path - return the path of a dentry
2501 * @path: the dentry/vfsmount to report 2510 * @path: the dentry/vfsmount to report
2502 * @root: root vfsmnt/dentry (may be modified by this function) 2511 * @root: root vfsmnt/dentry
2503 * @buf: buffer to return value in 2512 * @buf: buffer to return value in
2504 * @buflen: buffer length 2513 * @buflen: buffer length
2505 * 2514 *
@@ -2510,10 +2519,10 @@ global_root:
2510 * 2519 *
2511 * "buflen" should be positive. 2520 * "buflen" should be positive.
2512 * 2521 *
2513 * If path is not reachable from the supplied root, then the value of 2522 * If the path is not reachable from the supplied root, return %NULL.
2514 * root is changed (without modifying refcounts).
2515 */ 2523 */
2516char *__d_path(const struct path *path, struct path *root, 2524char *__d_path(const struct path *path,
2525 const struct path *root,
2517 char *buf, int buflen) 2526 char *buf, int buflen)
2518{ 2527{
2519 char *res = buf + buflen; 2528 char *res = buf + buflen;
@@ -2524,7 +2533,28 @@ char *__d_path(const struct path *path, struct path *root,
2524 error = prepend_path(path, root, &res, &buflen); 2533 error = prepend_path(path, root, &res, &buflen);
2525 write_sequnlock(&rename_lock); 2534 write_sequnlock(&rename_lock);
2526 2535
2527 if (error) 2536 if (error < 0)
2537 return ERR_PTR(error);
2538 if (error > 0)
2539 return NULL;
2540 return res;
2541}
2542
2543char *d_absolute_path(const struct path *path,
2544 char *buf, int buflen)
2545{
2546 struct path root = {};
2547 char *res = buf + buflen;
2548 int error;
2549
2550 prepend(&res, &buflen, "\0", 1);
2551 write_seqlock(&rename_lock);
2552 error = prepend_path(path, &root, &res, &buflen);
2553 write_sequnlock(&rename_lock);
2554
2555 if (error > 1)
2556 error = -EINVAL;
2557 if (error < 0)
2528 return ERR_PTR(error); 2558 return ERR_PTR(error);
2529 return res; 2559 return res;
2530} 2560}
@@ -2532,8 +2562,9 @@ char *__d_path(const struct path *path, struct path *root,
2532/* 2562/*
2533 * same as __d_path but appends "(deleted)" for unlinked files. 2563 * same as __d_path but appends "(deleted)" for unlinked files.
2534 */ 2564 */
2535static int path_with_deleted(const struct path *path, struct path *root, 2565static int path_with_deleted(const struct path *path,
2536 char **buf, int *buflen) 2566 const struct path *root,
2567 char **buf, int *buflen)
2537{ 2568{
2538 prepend(buf, buflen, "\0", 1); 2569 prepend(buf, buflen, "\0", 1);
2539 if (d_unlinked(path->dentry)) { 2570 if (d_unlinked(path->dentry)) {
@@ -2570,7 +2601,6 @@ char *d_path(const struct path *path, char *buf, int buflen)
2570{ 2601{
2571 char *res = buf + buflen; 2602 char *res = buf + buflen;
2572 struct path root; 2603 struct path root;
2573 struct path tmp;
2574 int error; 2604 int error;
2575 2605
2576 /* 2606 /*
@@ -2585,9 +2615,8 @@ char *d_path(const struct path *path, char *buf, int buflen)
2585 2615
2586 get_fs_root(current->fs, &root); 2616 get_fs_root(current->fs, &root);
2587 write_seqlock(&rename_lock); 2617 write_seqlock(&rename_lock);
2588 tmp = root; 2618 error = path_with_deleted(path, &root, &res, &buflen);
2589 error = path_with_deleted(path, &tmp, &res, &buflen); 2619 if (error < 0)
2590 if (error)
2591 res = ERR_PTR(error); 2620 res = ERR_PTR(error);
2592 write_sequnlock(&rename_lock); 2621 write_sequnlock(&rename_lock);
2593 path_put(&root); 2622 path_put(&root);
@@ -2608,7 +2637,6 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
2608{ 2637{
2609 char *res = buf + buflen; 2638 char *res = buf + buflen;
2610 struct path root; 2639 struct path root;
2611 struct path tmp;
2612 int error; 2640 int error;
2613 2641
2614 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2642 if (path->dentry->d_op && path->dentry->d_op->d_dname)
@@ -2616,9 +2644,8 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
2616 2644
2617 get_fs_root(current->fs, &root); 2645 get_fs_root(current->fs, &root);
2618 write_seqlock(&rename_lock); 2646 write_seqlock(&rename_lock);
2619 tmp = root; 2647 error = path_with_deleted(path, &root, &res, &buflen);
2620 error = path_with_deleted(path, &tmp, &res, &buflen); 2648 if (error > 0)
2621 if (!error && !path_equal(&tmp, &root))
2622 error = prepend_unreachable(&res, &buflen); 2649 error = prepend_unreachable(&res, &buflen);
2623 write_sequnlock(&rename_lock); 2650 write_sequnlock(&rename_lock);
2624 path_put(&root); 2651 path_put(&root);
@@ -2749,19 +2776,18 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2749 write_seqlock(&rename_lock); 2776 write_seqlock(&rename_lock);
2750 if (!d_unlinked(pwd.dentry)) { 2777 if (!d_unlinked(pwd.dentry)) {
2751 unsigned long len; 2778 unsigned long len;
2752 struct path tmp = root;
2753 char *cwd = page + PAGE_SIZE; 2779 char *cwd = page + PAGE_SIZE;
2754 int buflen = PAGE_SIZE; 2780 int buflen = PAGE_SIZE;
2755 2781
2756 prepend(&cwd, &buflen, "\0", 1); 2782 prepend(&cwd, &buflen, "\0", 1);
2757 error = prepend_path(&pwd, &tmp, &cwd, &buflen); 2783 error = prepend_path(&pwd, &root, &cwd, &buflen);
2758 write_sequnlock(&rename_lock); 2784 write_sequnlock(&rename_lock);
2759 2785
2760 if (error) 2786 if (error < 0)
2761 goto out; 2787 goto out;
2762 2788
2763 /* Unreachable from current root */ 2789 /* Unreachable from current root */
2764 if (!path_equal(&tmp, &root)) { 2790 if (error > 0) {
2765 error = prepend_unreachable(&cwd, &buflen); 2791 error = prepend_unreachable(&cwd, &buflen);
2766 if (error) 2792 if (error)
2767 goto out; 2793 goto out;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 58609bde3b9f..2a834255c75d 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -967,7 +967,7 @@ static void ecryptfs_set_default_crypt_stat_vals(
967 967
968/** 968/**
969 * ecryptfs_new_file_context 969 * ecryptfs_new_file_context
970 * @ecryptfs_dentry: The eCryptfs dentry 970 * @ecryptfs_inode: The eCryptfs inode
971 * 971 *
972 * If the crypto context for the file has not yet been established, 972 * If the crypto context for the file has not yet been established,
973 * this is where we do that. Establishing a new crypto context 973 * this is where we do that. Establishing a new crypto context
@@ -984,13 +984,13 @@ static void ecryptfs_set_default_crypt_stat_vals(
984 * 984 *
985 * Returns zero on success; non-zero otherwise 985 * Returns zero on success; non-zero otherwise
986 */ 986 */
987int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry) 987int ecryptfs_new_file_context(struct inode *ecryptfs_inode)
988{ 988{
989 struct ecryptfs_crypt_stat *crypt_stat = 989 struct ecryptfs_crypt_stat *crypt_stat =
990 &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; 990 &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
991 struct ecryptfs_mount_crypt_stat *mount_crypt_stat = 991 struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
992 &ecryptfs_superblock_to_private( 992 &ecryptfs_superblock_to_private(
993 ecryptfs_dentry->d_sb)->mount_crypt_stat; 993 ecryptfs_inode->i_sb)->mount_crypt_stat;
994 int cipher_name_len; 994 int cipher_name_len;
995 int rc = 0; 995 int rc = 0;
996 996
@@ -1299,12 +1299,12 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max,
1299} 1299}
1300 1300
1301static int 1301static int
1302ecryptfs_write_metadata_to_contents(struct dentry *ecryptfs_dentry, 1302ecryptfs_write_metadata_to_contents(struct inode *ecryptfs_inode,
1303 char *virt, size_t virt_len) 1303 char *virt, size_t virt_len)
1304{ 1304{
1305 int rc; 1305 int rc;
1306 1306
1307 rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt, 1307 rc = ecryptfs_write_lower(ecryptfs_inode, virt,
1308 0, virt_len); 1308 0, virt_len);
1309 if (rc < 0) 1309 if (rc < 0)
1310 printk(KERN_ERR "%s: Error attempting to write header " 1310 printk(KERN_ERR "%s: Error attempting to write header "
@@ -1338,7 +1338,8 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
1338 1338
1339/** 1339/**
1340 * ecryptfs_write_metadata 1340 * ecryptfs_write_metadata
1341 * @ecryptfs_dentry: The eCryptfs dentry 1341 * @ecryptfs_dentry: The eCryptfs dentry, which should be negative
1342 * @ecryptfs_inode: The newly created eCryptfs inode
1342 * 1343 *
1343 * Write the file headers out. This will likely involve a userspace 1344 * Write the file headers out. This will likely involve a userspace
1344 * callout, in which the session key is encrypted with one or more 1345 * callout, in which the session key is encrypted with one or more
@@ -1348,10 +1349,11 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
1348 * 1349 *
1349 * Returns zero on success; non-zero on error 1350 * Returns zero on success; non-zero on error
1350 */ 1351 */
1351int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) 1352int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
1353 struct inode *ecryptfs_inode)
1352{ 1354{
1353 struct ecryptfs_crypt_stat *crypt_stat = 1355 struct ecryptfs_crypt_stat *crypt_stat =
1354 &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; 1356 &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
1355 unsigned int order; 1357 unsigned int order;
1356 char *virt; 1358 char *virt;
1357 size_t virt_len; 1359 size_t virt_len;
@@ -1391,7 +1393,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
1391 rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt, 1393 rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt,
1392 size); 1394 size);
1393 else 1395 else
1394 rc = ecryptfs_write_metadata_to_contents(ecryptfs_dentry, virt, 1396 rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt,
1395 virt_len); 1397 virt_len);
1396 if (rc) { 1398 if (rc) {
1397 printk(KERN_ERR "%s: Error writing metadata out to lower file; " 1399 printk(KERN_ERR "%s: Error writing metadata out to lower file; "
@@ -1943,7 +1945,7 @@ static unsigned char *portable_filename_chars = ("-.0123456789ABCD"
1943 1945
1944/* We could either offset on every reverse map or just pad some 0x00's 1946/* We could either offset on every reverse map or just pad some 0x00's
1945 * at the front here */ 1947 * at the front here */
1946static const unsigned char filename_rev_map[] = { 1948static const unsigned char filename_rev_map[256] = {
1947 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */ 1949 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */
1948 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */ 1950 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */
1949 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */ 1951 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */
@@ -1959,7 +1961,7 @@ static const unsigned char filename_rev_map[] = {
1959 0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */ 1961 0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */
1960 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */ 1962 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */
1961 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */ 1963 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */
1962 0x3D, 0x3E, 0x3F 1964 0x3D, 0x3E, 0x3F /* 123 - 255 initialized to 0x00 */
1963}; 1965};
1964 1966
1965/** 1967/**
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 54481a3b2c79..a9f29b12fbf2 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -584,9 +584,10 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat);
584int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode); 584int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode);
585int ecryptfs_encrypt_page(struct page *page); 585int ecryptfs_encrypt_page(struct page *page);
586int ecryptfs_decrypt_page(struct page *page); 586int ecryptfs_decrypt_page(struct page *page);
587int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry); 587int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
588 struct inode *ecryptfs_inode);
588int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry); 589int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry);
589int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry); 590int ecryptfs_new_file_context(struct inode *ecryptfs_inode);
590void ecryptfs_write_crypt_stat_flags(char *page_virt, 591void ecryptfs_write_crypt_stat_flags(char *page_virt,
591 struct ecryptfs_crypt_stat *crypt_stat, 592 struct ecryptfs_crypt_stat *crypt_stat,
592 size_t *written); 593 size_t *written);
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index c6ac98cf9baa..d3f95f941c47 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -139,6 +139,27 @@ out:
139 return rc; 139 return rc;
140} 140}
141 141
142static void ecryptfs_vma_close(struct vm_area_struct *vma)
143{
144 filemap_write_and_wait(vma->vm_file->f_mapping);
145}
146
147static const struct vm_operations_struct ecryptfs_file_vm_ops = {
148 .close = ecryptfs_vma_close,
149 .fault = filemap_fault,
150};
151
152static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma)
153{
154 int rc;
155
156 rc = generic_file_mmap(file, vma);
157 if (!rc)
158 vma->vm_ops = &ecryptfs_file_vm_ops;
159
160 return rc;
161}
162
142struct kmem_cache *ecryptfs_file_info_cache; 163struct kmem_cache *ecryptfs_file_info_cache;
143 164
144/** 165/**
@@ -349,7 +370,7 @@ const struct file_operations ecryptfs_main_fops = {
349#ifdef CONFIG_COMPAT 370#ifdef CONFIG_COMPAT
350 .compat_ioctl = ecryptfs_compat_ioctl, 371 .compat_ioctl = ecryptfs_compat_ioctl,
351#endif 372#endif
352 .mmap = generic_file_mmap, 373 .mmap = ecryptfs_file_mmap,
353 .open = ecryptfs_open, 374 .open = ecryptfs_open,
354 .flush = ecryptfs_flush, 375 .flush = ecryptfs_flush,
355 .release = ecryptfs_release, 376 .release = ecryptfs_release,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index a36d327f1521..32f90a3ae63e 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -172,22 +172,23 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
172 * it. It will also update the eCryptfs directory inode to mimic the 172 * it. It will also update the eCryptfs directory inode to mimic the
173 * stat of the lower directory inode. 173 * stat of the lower directory inode.
174 * 174 *
175 * Returns zero on success; non-zero on error condition 175 * Returns the new eCryptfs inode on success; an ERR_PTR on error condition
176 */ 176 */
177static int 177static struct inode *
178ecryptfs_do_create(struct inode *directory_inode, 178ecryptfs_do_create(struct inode *directory_inode,
179 struct dentry *ecryptfs_dentry, int mode) 179 struct dentry *ecryptfs_dentry, int mode)
180{ 180{
181 int rc; 181 int rc;
182 struct dentry *lower_dentry; 182 struct dentry *lower_dentry;
183 struct dentry *lower_dir_dentry; 183 struct dentry *lower_dir_dentry;
184 struct inode *inode;
184 185
185 lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); 186 lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
186 lower_dir_dentry = lock_parent(lower_dentry); 187 lower_dir_dentry = lock_parent(lower_dentry);
187 if (IS_ERR(lower_dir_dentry)) { 188 if (IS_ERR(lower_dir_dentry)) {
188 ecryptfs_printk(KERN_ERR, "Error locking directory of " 189 ecryptfs_printk(KERN_ERR, "Error locking directory of "
189 "dentry\n"); 190 "dentry\n");
190 rc = PTR_ERR(lower_dir_dentry); 191 inode = ERR_CAST(lower_dir_dentry);
191 goto out; 192 goto out;
192 } 193 }
193 rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode, 194 rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode,
@@ -195,20 +196,19 @@ ecryptfs_do_create(struct inode *directory_inode,
195 if (rc) { 196 if (rc) {
196 printk(KERN_ERR "%s: Failure to create dentry in lower fs; " 197 printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
197 "rc = [%d]\n", __func__, rc); 198 "rc = [%d]\n", __func__, rc);
199 inode = ERR_PTR(rc);
198 goto out_lock; 200 goto out_lock;
199 } 201 }
200 rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry, 202 inode = __ecryptfs_get_inode(lower_dentry->d_inode,
201 directory_inode->i_sb); 203 directory_inode->i_sb);
202 if (rc) { 204 if (IS_ERR(inode))
203 ecryptfs_printk(KERN_ERR, "Failure in ecryptfs_interpose\n");
204 goto out_lock; 205 goto out_lock;
205 }
206 fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode); 206 fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode);
207 fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode); 207 fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode);
208out_lock: 208out_lock:
209 unlock_dir(lower_dir_dentry); 209 unlock_dir(lower_dir_dentry);
210out: 210out:
211 return rc; 211 return inode;
212} 212}
213 213
214/** 214/**
@@ -219,26 +219,26 @@ out:
219 * 219 *
220 * Returns zero on success 220 * Returns zero on success
221 */ 221 */
222static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry) 222static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
223 struct inode *ecryptfs_inode)
223{ 224{
224 struct ecryptfs_crypt_stat *crypt_stat = 225 struct ecryptfs_crypt_stat *crypt_stat =
225 &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; 226 &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
226 int rc = 0; 227 int rc = 0;
227 228
228 if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { 229 if (S_ISDIR(ecryptfs_inode->i_mode)) {
229 ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); 230 ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
230 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); 231 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
231 goto out; 232 goto out;
232 } 233 }
233 ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n"); 234 ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n");
234 rc = ecryptfs_new_file_context(ecryptfs_dentry); 235 rc = ecryptfs_new_file_context(ecryptfs_inode);
235 if (rc) { 236 if (rc) {
236 ecryptfs_printk(KERN_ERR, "Error creating new file " 237 ecryptfs_printk(KERN_ERR, "Error creating new file "
237 "context; rc = [%d]\n", rc); 238 "context; rc = [%d]\n", rc);
238 goto out; 239 goto out;
239 } 240 }
240 rc = ecryptfs_get_lower_file(ecryptfs_dentry, 241 rc = ecryptfs_get_lower_file(ecryptfs_dentry, ecryptfs_inode);
241 ecryptfs_dentry->d_inode);
242 if (rc) { 242 if (rc) {
243 printk(KERN_ERR "%s: Error attempting to initialize " 243 printk(KERN_ERR "%s: Error attempting to initialize "
244 "the lower file for the dentry with name " 244 "the lower file for the dentry with name "
@@ -246,10 +246,10 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
246 ecryptfs_dentry->d_name.name, rc); 246 ecryptfs_dentry->d_name.name, rc);
247 goto out; 247 goto out;
248 } 248 }
249 rc = ecryptfs_write_metadata(ecryptfs_dentry); 249 rc = ecryptfs_write_metadata(ecryptfs_dentry, ecryptfs_inode);
250 if (rc) 250 if (rc)
251 printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc); 251 printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
252 ecryptfs_put_lower_file(ecryptfs_dentry->d_inode); 252 ecryptfs_put_lower_file(ecryptfs_inode);
253out: 253out:
254 return rc; 254 return rc;
255} 255}
@@ -269,18 +269,28 @@ static int
269ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, 269ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
270 int mode, struct nameidata *nd) 270 int mode, struct nameidata *nd)
271{ 271{
272 struct inode *ecryptfs_inode;
272 int rc; 273 int rc;
273 274
274 /* ecryptfs_do_create() calls ecryptfs_interpose() */ 275 ecryptfs_inode = ecryptfs_do_create(directory_inode, ecryptfs_dentry,
275 rc = ecryptfs_do_create(directory_inode, ecryptfs_dentry, mode); 276 mode);
276 if (unlikely(rc)) { 277 if (unlikely(IS_ERR(ecryptfs_inode))) {
277 ecryptfs_printk(KERN_WARNING, "Failed to create file in" 278 ecryptfs_printk(KERN_WARNING, "Failed to create file in"
278 "lower filesystem\n"); 279 "lower filesystem\n");
280 rc = PTR_ERR(ecryptfs_inode);
279 goto out; 281 goto out;
280 } 282 }
281 /* At this point, a file exists on "disk"; we need to make sure 283 /* At this point, a file exists on "disk"; we need to make sure
282 * that this on disk file is prepared to be an ecryptfs file */ 284 * that this on disk file is prepared to be an ecryptfs file */
283 rc = ecryptfs_initialize_file(ecryptfs_dentry); 285 rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode);
286 if (rc) {
287 drop_nlink(ecryptfs_inode);
288 unlock_new_inode(ecryptfs_inode);
289 iput(ecryptfs_inode);
290 goto out;
291 }
292 d_instantiate(ecryptfs_dentry, ecryptfs_inode);
293 unlock_new_inode(ecryptfs_inode);
284out: 294out:
285 return rc; 295 return rc;
286} 296}
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index f6dba4505f1c..12ccacda44e0 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -565,7 +565,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
565 brelse(bitmap_bh); 565 brelse(bitmap_bh);
566 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu" 566 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
567 ", computed = %llu, %llu\n", 567 ", computed = %llu, %llu\n",
568 EXT4_B2C(sbi, ext4_free_blocks_count(es)), 568 EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
569 desc_count, bitmap_count); 569 desc_count, bitmap_count);
570 return bitmap_count; 570 return bitmap_count;
571#else 571#else
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 61fa9e1614af..607b1557d292 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1095,7 +1095,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1095 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1095 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1096 ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1096 ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1097 1097
1098 neh->eh_depth = cpu_to_le16(neh->eh_depth + 1); 1098 neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
1099 ext4_mark_inode_dirty(handle, inode); 1099 ext4_mark_inode_dirty(handle, inode);
1100out: 1100out:
1101 brelse(bh); 1101 brelse(bh);
@@ -2955,7 +2955,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2955 /* Pre-conditions */ 2955 /* Pre-conditions */
2956 BUG_ON(!ext4_ext_is_uninitialized(ex)); 2956 BUG_ON(!ext4_ext_is_uninitialized(ex));
2957 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 2957 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
2958 BUG_ON(map->m_lblk + map->m_len > ee_block + ee_len);
2959 2958
2960 /* 2959 /*
2961 * Attempt to transfer newly initialized blocks from the currently 2960 * Attempt to transfer newly initialized blocks from the currently
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 240f6e2dc7ee..92655fd89657 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1339,8 +1339,11 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
1339 clear_buffer_unwritten(bh); 1339 clear_buffer_unwritten(bh);
1340 } 1340 }
1341 1341
1342 /* skip page if block allocation undone */ 1342 /*
1343 if (buffer_delay(bh) || buffer_unwritten(bh)) 1343 * skip page if block allocation undone and
1344 * block is dirty
1345 */
1346 if (ext4_bh_delay_or_unwritten(NULL, bh))
1344 skip_page = 1; 1347 skip_page = 1;
1345 bh = bh->b_this_page; 1348 bh = bh->b_this_page;
1346 block_start += bh->b_size; 1349 block_start += bh->b_size;
@@ -2270,6 +2273,7 @@ retry:
2270 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2273 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2271 "%ld pages, ino %lu; err %d", __func__, 2274 "%ld pages, ino %lu; err %d", __func__,
2272 wbc->nr_to_write, inode->i_ino, ret); 2275 wbc->nr_to_write, inode->i_ino, ret);
2276 blk_finish_plug(&plug);
2273 goto out_writepages; 2277 goto out_writepages;
2274 } 2278 }
2275 2279
@@ -2386,7 +2390,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2386 pgoff_t index; 2390 pgoff_t index;
2387 struct inode *inode = mapping->host; 2391 struct inode *inode = mapping->host;
2388 handle_t *handle; 2392 handle_t *handle;
2389 loff_t page_len;
2390 2393
2391 index = pos >> PAGE_CACHE_SHIFT; 2394 index = pos >> PAGE_CACHE_SHIFT;
2392 2395
@@ -2433,13 +2436,6 @@ retry:
2433 */ 2436 */
2434 if (pos + len > inode->i_size) 2437 if (pos + len > inode->i_size)
2435 ext4_truncate_failed_write(inode); 2438 ext4_truncate_failed_write(inode);
2436 } else {
2437 page_len = pos & (PAGE_CACHE_SIZE - 1);
2438 if (page_len > 0) {
2439 ret = ext4_discard_partial_page_buffers_no_lock(handle,
2440 inode, page, pos - page_len, page_len,
2441 EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
2442 }
2443 } 2439 }
2444 2440
2445 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 2441 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -2482,7 +2478,6 @@ static int ext4_da_write_end(struct file *file,
2482 loff_t new_i_size; 2478 loff_t new_i_size;
2483 unsigned long start, end; 2479 unsigned long start, end;
2484 int write_mode = (int)(unsigned long)fsdata; 2480 int write_mode = (int)(unsigned long)fsdata;
2485 loff_t page_len;
2486 2481
2487 if (write_mode == FALL_BACK_TO_NONDELALLOC) { 2482 if (write_mode == FALL_BACK_TO_NONDELALLOC) {
2488 if (ext4_should_order_data(inode)) { 2483 if (ext4_should_order_data(inode)) {
@@ -2507,7 +2502,7 @@ static int ext4_da_write_end(struct file *file,
2507 */ 2502 */
2508 2503
2509 new_i_size = pos + copied; 2504 new_i_size = pos + copied;
2510 if (new_i_size > EXT4_I(inode)->i_disksize) { 2505 if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
2511 if (ext4_da_should_update_i_disksize(page, end)) { 2506 if (ext4_da_should_update_i_disksize(page, end)) {
2512 down_write(&EXT4_I(inode)->i_data_sem); 2507 down_write(&EXT4_I(inode)->i_data_sem);
2513 if (new_i_size > EXT4_I(inode)->i_disksize) { 2508 if (new_i_size > EXT4_I(inode)->i_disksize) {
@@ -2531,16 +2526,6 @@ static int ext4_da_write_end(struct file *file,
2531 } 2526 }
2532 ret2 = generic_write_end(file, mapping, pos, len, copied, 2527 ret2 = generic_write_end(file, mapping, pos, len, copied,
2533 page, fsdata); 2528 page, fsdata);
2534
2535 page_len = PAGE_CACHE_SIZE -
2536 ((pos + copied - 1) & (PAGE_CACHE_SIZE - 1));
2537
2538 if (page_len > 0) {
2539 ret = ext4_discard_partial_page_buffers_no_lock(handle,
2540 inode, page, pos + copied - 1, page_len,
2541 EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
2542 }
2543
2544 copied = ret2; 2529 copied = ret2;
2545 if (ret2 < 0) 2530 if (ret2 < 0)
2546 ret = ret2; 2531 ret = ret2;
@@ -2780,10 +2765,11 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
2780 iocb->private, io_end->inode->i_ino, iocb, offset, 2765 iocb->private, io_end->inode->i_ino, iocb, offset,
2781 size); 2766 size);
2782 2767
2768 iocb->private = NULL;
2769
2783 /* if not aio dio with unwritten extents, just free io and return */ 2770 /* if not aio dio with unwritten extents, just free io and return */
2784 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 2771 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
2785 ext4_free_io_end(io_end); 2772 ext4_free_io_end(io_end);
2786 iocb->private = NULL;
2787out: 2773out:
2788 if (is_async) 2774 if (is_async)
2789 aio_complete(iocb, ret, 0); 2775 aio_complete(iocb, ret, 0);
@@ -2807,7 +2793,6 @@ out:
2807 2793
2808 /* queue the work to convert unwritten extents to written */ 2794 /* queue the work to convert unwritten extents to written */
2809 queue_work(wq, &io_end->work); 2795 queue_work(wq, &io_end->work);
2810 iocb->private = NULL;
2811 2796
2812 /* XXX: probably should move into the real I/O completion handler */ 2797 /* XXX: probably should move into the real I/O completion handler */
2813 inode_dio_done(inode); 2798 inode_dio_done(inode);
@@ -3202,26 +3187,8 @@ int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
3202 3187
3203 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3188 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3204 3189
3205 if (!page_has_buffers(page)) { 3190 if (!page_has_buffers(page))
3206 /* 3191 create_empty_buffers(page, blocksize, 0);
3207 * If the range to be discarded covers a partial block
3208 * we need to get the page buffers. This is because
3209 * partial blocks cannot be released and the page needs
3210 * to be updated with the contents of the block before
3211 * we write the zeros on top of it.
3212 */
3213 if ((from & (blocksize - 1)) ||
3214 ((from + length) & (blocksize - 1))) {
3215 create_empty_buffers(page, blocksize, 0);
3216 } else {
3217 /*
3218 * If there are no partial blocks,
3219 * there is nothing to update,
3220 * so we can return now
3221 */
3222 return 0;
3223 }
3224 }
3225 3192
3226 /* Find the buffer that contains "offset" */ 3193 /* Find the buffer that contains "offset" */
3227 bh = page_buffers(page); 3194 bh = page_buffers(page);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 7ce1d0b19c94..7e106c810c62 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -385,6 +385,18 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
385 385
386 block_end = block_start + blocksize; 386 block_end = block_start + blocksize;
387 if (block_start >= len) { 387 if (block_start >= len) {
388 /*
389 * Comments copied from block_write_full_page_endio:
390 *
391 * The page straddles i_size. It must be zeroed out on
392 * each and every writepage invocation because it may
393 * be mmapped. "A file is mapped in multiples of the
394 * page size. For a file that is not a multiple of
395 * the page size, the remaining memory is zeroed when
396 * mapped, and writes to that region are not written
397 * out to the file."
398 */
399 zero_user_segment(page, block_start, block_end);
388 clear_buffer_dirty(bh); 400 clear_buffer_dirty(bh);
389 set_buffer_uptodate(bh); 401 set_buffer_uptodate(bh);
390 continue; 402 continue;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 9953d80145ad..3e1329e2f826 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1155,9 +1155,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
1155 seq_puts(seq, ",block_validity"); 1155 seq_puts(seq, ",block_validity");
1156 1156
1157 if (!test_opt(sb, INIT_INODE_TABLE)) 1157 if (!test_opt(sb, INIT_INODE_TABLE))
1158 seq_puts(seq, ",noinit_inode_table"); 1158 seq_puts(seq, ",noinit_itable");
1159 else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT) 1159 else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
1160 seq_printf(seq, ",init_inode_table=%u", 1160 seq_printf(seq, ",init_itable=%u",
1161 (unsigned) sbi->s_li_wait_mult); 1161 (unsigned) sbi->s_li_wait_mult);
1162 1162
1163 ext4_show_quota_options(seq, sb); 1163 ext4_show_quota_options(seq, sb);
@@ -1333,8 +1333,7 @@ enum {
1333 Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, 1333 Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1334 Opt_inode_readahead_blks, Opt_journal_ioprio, 1334 Opt_inode_readahead_blks, Opt_journal_ioprio,
1335 Opt_dioread_nolock, Opt_dioread_lock, 1335 Opt_dioread_nolock, Opt_dioread_lock,
1336 Opt_discard, Opt_nodiscard, 1336 Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1337 Opt_init_inode_table, Opt_noinit_inode_table,
1338}; 1337};
1339 1338
1340static const match_table_t tokens = { 1339static const match_table_t tokens = {
@@ -1407,9 +1406,9 @@ static const match_table_t tokens = {
1407 {Opt_dioread_lock, "dioread_lock"}, 1406 {Opt_dioread_lock, "dioread_lock"},
1408 {Opt_discard, "discard"}, 1407 {Opt_discard, "discard"},
1409 {Opt_nodiscard, "nodiscard"}, 1408 {Opt_nodiscard, "nodiscard"},
1410 {Opt_init_inode_table, "init_itable=%u"}, 1409 {Opt_init_itable, "init_itable=%u"},
1411 {Opt_init_inode_table, "init_itable"}, 1410 {Opt_init_itable, "init_itable"},
1412 {Opt_noinit_inode_table, "noinit_itable"}, 1411 {Opt_noinit_itable, "noinit_itable"},
1413 {Opt_err, NULL}, 1412 {Opt_err, NULL},
1414}; 1413};
1415 1414
@@ -1683,7 +1682,9 @@ static int parse_options(char *options, struct super_block *sb,
1683 data_opt = EXT4_MOUNT_WRITEBACK_DATA; 1682 data_opt = EXT4_MOUNT_WRITEBACK_DATA;
1684 datacheck: 1683 datacheck:
1685 if (is_remount) { 1684 if (is_remount) {
1686 if (test_opt(sb, DATA_FLAGS) != data_opt) { 1685 if (!sbi->s_journal)
1686 ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
1687 else if (test_opt(sb, DATA_FLAGS) != data_opt) {
1687 ext4_msg(sb, KERN_ERR, 1688 ext4_msg(sb, KERN_ERR,
1688 "Cannot change data mode on remount"); 1689 "Cannot change data mode on remount");
1689 return 0; 1690 return 0;
@@ -1890,7 +1891,7 @@ set_qf_format:
1890 case Opt_dioread_lock: 1891 case Opt_dioread_lock:
1891 clear_opt(sb, DIOREAD_NOLOCK); 1892 clear_opt(sb, DIOREAD_NOLOCK);
1892 break; 1893 break;
1893 case Opt_init_inode_table: 1894 case Opt_init_itable:
1894 set_opt(sb, INIT_INODE_TABLE); 1895 set_opt(sb, INIT_INODE_TABLE);
1895 if (args[0].from) { 1896 if (args[0].from) {
1896 if (match_int(&args[0], &option)) 1897 if (match_int(&args[0], &option))
@@ -1901,7 +1902,7 @@ set_qf_format:
1901 return 0; 1902 return 0;
1902 sbi->s_li_wait_mult = option; 1903 sbi->s_li_wait_mult = option;
1903 break; 1904 break;
1904 case Opt_noinit_inode_table: 1905 case Opt_noinit_itable:
1905 clear_opt(sb, INIT_INODE_TABLE); 1906 clear_opt(sb, INIT_INODE_TABLE);
1906 break; 1907 break;
1907 default: 1908 default:
@@ -3099,8 +3100,6 @@ static void ext4_destroy_lazyinit_thread(void)
3099} 3100}
3100 3101
3101static int ext4_fill_super(struct super_block *sb, void *data, int silent) 3102static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3102 __releases(kernel_lock)
3103 __acquires(kernel_lock)
3104{ 3103{
3105 char *orig_data = kstrdup(data, GFP_KERNEL); 3104 char *orig_data = kstrdup(data, GFP_KERNEL);
3106 struct buffer_head *bh; 3105 struct buffer_head *bh;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 73c3992b2bb4..ac86f8b3e3cb 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -156,6 +156,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
156 * bdi_start_writeback - start writeback 156 * bdi_start_writeback - start writeback
157 * @bdi: the backing device to write from 157 * @bdi: the backing device to write from
158 * @nr_pages: the number of pages to write 158 * @nr_pages: the number of pages to write
159 * @reason: reason why some writeback work was initiated
159 * 160 *
160 * Description: 161 * Description:
161 * This does WB_SYNC_NONE opportunistic writeback. The IO is only 162 * This does WB_SYNC_NONE opportunistic writeback. The IO is only
@@ -1223,6 +1224,7 @@ static void wait_sb_inodes(struct super_block *sb)
1223 * writeback_inodes_sb_nr - writeback dirty inodes from given super_block 1224 * writeback_inodes_sb_nr - writeback dirty inodes from given super_block
1224 * @sb: the superblock 1225 * @sb: the superblock
1225 * @nr: the number of pages to write 1226 * @nr: the number of pages to write
1227 * @reason: reason why some writeback work initiated
1226 * 1228 *
1227 * Start writeback on some inodes on this super_block. No guarantees are made 1229 * Start writeback on some inodes on this super_block. No guarantees are made
1228 * on how many (if any) will be written, and this function does not wait 1230 * on how many (if any) will be written, and this function does not wait
@@ -1251,6 +1253,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr);
1251/** 1253/**
1252 * writeback_inodes_sb - writeback dirty inodes from given super_block 1254 * writeback_inodes_sb - writeback dirty inodes from given super_block
1253 * @sb: the superblock 1255 * @sb: the superblock
1256 * @reason: reason why some writeback work was initiated
1254 * 1257 *
1255 * Start writeback on some inodes on this super_block. No guarantees are made 1258 * Start writeback on some inodes on this super_block. No guarantees are made
1256 * on how many (if any) will be written, and this function does not wait 1259 * on how many (if any) will be written, and this function does not wait
@@ -1265,6 +1268,7 @@ EXPORT_SYMBOL(writeback_inodes_sb);
1265/** 1268/**
1266 * writeback_inodes_sb_if_idle - start writeback if none underway 1269 * writeback_inodes_sb_if_idle - start writeback if none underway
1267 * @sb: the superblock 1270 * @sb: the superblock
1271 * @reason: reason why some writeback work was initiated
1268 * 1272 *
1269 * Invoke writeback_inodes_sb if no writeback is currently underway. 1273 * Invoke writeback_inodes_sb if no writeback is currently underway.
1270 * Returns 1 if writeback was started, 0 if not. 1274 * Returns 1 if writeback was started, 0 if not.
@@ -1285,6 +1289,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
1285 * writeback_inodes_sb_if_idle - start writeback if none underway 1289 * writeback_inodes_sb_if_idle - start writeback if none underway
1286 * @sb: the superblock 1290 * @sb: the superblock
1287 * @nr: the number of pages to write 1291 * @nr: the number of pages to write
1292 * @reason: reason why some writeback work was initiated
1288 * 1293 *
1289 * Invoke writeback_inodes_sb if no writeback is currently underway. 1294 * Invoke writeback_inodes_sb if no writeback is currently underway.
1290 * Returns 1 if writeback was started, 0 if not. 1295 * Returns 1 if writeback was started, 0 if not.
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 5cb8614508c3..2aaf3eaaf13d 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1512,7 +1512,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1512 else if (outarg->offset + num > file_size) 1512 else if (outarg->offset + num > file_size)
1513 num = file_size - outarg->offset; 1513 num = file_size - outarg->offset;
1514 1514
1515 while (num) { 1515 while (num && req->num_pages < FUSE_MAX_PAGES_PER_REQ) {
1516 struct page *page; 1516 struct page *page;
1517 unsigned int this_num; 1517 unsigned int this_num;
1518 1518
@@ -1526,6 +1526,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1526 1526
1527 num -= this_num; 1527 num -= this_num;
1528 total_len += this_num; 1528 total_len += this_num;
1529 index++;
1529 } 1530 }
1530 req->misc.retrieve_in.offset = outarg->offset; 1531 req->misc.retrieve_in.offset = outarg->offset;
1531 req->misc.retrieve_in.size = total_len; 1532 req->misc.retrieve_in.size = total_len;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 594f07a81c28..0c84100acd44 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1556,7 +1556,7 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
1556 struct inode *inode = file->f_path.dentry->d_inode; 1556 struct inode *inode = file->f_path.dentry->d_inode;
1557 1557
1558 mutex_lock(&inode->i_mutex); 1558 mutex_lock(&inode->i_mutex);
1559 if (origin != SEEK_CUR || origin != SEEK_SET) { 1559 if (origin != SEEK_CUR && origin != SEEK_SET) {
1560 retval = fuse_update_attributes(inode, NULL, file, NULL); 1560 retval = fuse_update_attributes(inode, NULL, file, NULL);
1561 if (retval) 1561 if (retval)
1562 goto exit; 1562 goto exit;
@@ -1567,6 +1567,10 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
1567 offset += i_size_read(inode); 1567 offset += i_size_read(inode);
1568 break; 1568 break;
1569 case SEEK_CUR: 1569 case SEEK_CUR:
1570 if (offset == 0) {
1571 retval = file->f_pos;
1572 goto exit;
1573 }
1570 offset += file->f_pos; 1574 offset += file->f_pos;
1571 break; 1575 break;
1572 case SEEK_DATA: 1576 case SEEK_DATA:
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 3e6d72756479..aa83109b9431 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1138,28 +1138,28 @@ static int __init fuse_fs_init(void)
1138{ 1138{
1139 int err; 1139 int err;
1140 1140
1141 err = register_filesystem(&fuse_fs_type);
1142 if (err)
1143 goto out;
1144
1145 err = register_fuseblk();
1146 if (err)
1147 goto out_unreg;
1148
1149 fuse_inode_cachep = kmem_cache_create("fuse_inode", 1141 fuse_inode_cachep = kmem_cache_create("fuse_inode",
1150 sizeof(struct fuse_inode), 1142 sizeof(struct fuse_inode),
1151 0, SLAB_HWCACHE_ALIGN, 1143 0, SLAB_HWCACHE_ALIGN,
1152 fuse_inode_init_once); 1144 fuse_inode_init_once);
1153 err = -ENOMEM; 1145 err = -ENOMEM;
1154 if (!fuse_inode_cachep) 1146 if (!fuse_inode_cachep)
1155 goto out_unreg2; 1147 goto out;
1148
1149 err = register_fuseblk();
1150 if (err)
1151 goto out2;
1152
1153 err = register_filesystem(&fuse_fs_type);
1154 if (err)
1155 goto out3;
1156 1156
1157 return 0; 1157 return 0;
1158 1158
1159 out_unreg2: 1159 out3:
1160 unregister_fuseblk(); 1160 unregister_fuseblk();
1161 out_unreg: 1161 out2:
1162 unregister_filesystem(&fuse_fs_type); 1162 kmem_cache_destroy(fuse_inode_cachep);
1163 out: 1163 out:
1164 return err; 1164 return err;
1165} 1165}
diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c
index e673a88b8ae7..b1ce4c7ad3fb 100644
--- a/fs/hfs/trans.c
+++ b/fs/hfs/trans.c
@@ -40,6 +40,8 @@ int hfs_mac2asc(struct super_block *sb, char *out, const struct hfs_name *in)
40 40
41 src = in->name; 41 src = in->name;
42 srclen = in->len; 42 srclen = in->len;
43 if (srclen > HFS_NAMELEN)
44 srclen = HFS_NAMELEN;
43 dst = out; 45 dst = out;
44 dstlen = HFS_MAX_NAMELEN; 46 dstlen = HFS_MAX_NAMELEN;
45 if (nls_io) { 47 if (nls_io) {
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index 3f32bcb0d9bd..ef175cb8cfd8 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -16,38 +16,26 @@
16#include <linux/bitops.h> 16#include <linux/bitops.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18 18
19static const int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 };
20
21static DEFINE_SPINLOCK(bitmap_lock); 19static DEFINE_SPINLOCK(bitmap_lock);
22 20
23static unsigned long count_free(struct buffer_head *map[], unsigned numblocks, __u32 numbits) 21/*
22 * bitmap consists of blocks filled with 16bit words
23 * bit set == busy, bit clear == free
24 * endianness is a mess, but for counting zero bits it really doesn't matter...
25 */
26static __u32 count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits)
24{ 27{
25 unsigned i, j, sum = 0; 28 __u32 sum = 0;
26 struct buffer_head *bh; 29 unsigned blocks = DIV_ROUND_UP(numbits, blocksize * 8);
27
28 for (i=0; i<numblocks-1; i++) {
29 if (!(bh=map[i]))
30 return(0);
31 for (j=0; j<bh->b_size; j++)
32 sum += nibblemap[bh->b_data[j] & 0xf]
33 + nibblemap[(bh->b_data[j]>>4) & 0xf];
34 }
35 30
36 if (numblocks==0 || !(bh=map[numblocks-1])) 31 while (blocks--) {
37 return(0); 32 unsigned words = blocksize / 2;
38 i = ((numbits - (numblocks-1) * bh->b_size * 8) / 16) * 2; 33 __u16 *p = (__u16 *)(*map++)->b_data;
39 for (j=0; j<i; j++) { 34 while (words--)
40 sum += nibblemap[bh->b_data[j] & 0xf] 35 sum += 16 - hweight16(*p++);
41 + nibblemap[(bh->b_data[j]>>4) & 0xf];
42 } 36 }
43 37
44 i = numbits%16; 38 return sum;
45 if (i!=0) {
46 i = *(__u16 *)(&bh->b_data[j]) | ~((1<<i) - 1);
47 sum += nibblemap[i & 0xf] + nibblemap[(i>>4) & 0xf];
48 sum += nibblemap[(i>>8) & 0xf] + nibblemap[(i>>12) & 0xf];
49 }
50 return(sum);
51} 39}
52 40
53void minix_free_block(struct inode *inode, unsigned long block) 41void minix_free_block(struct inode *inode, unsigned long block)
@@ -105,10 +93,12 @@ int minix_new_block(struct inode * inode)
105 return 0; 93 return 0;
106} 94}
107 95
108unsigned long minix_count_free_blocks(struct minix_sb_info *sbi) 96unsigned long minix_count_free_blocks(struct super_block *sb)
109{ 97{
110 return (count_free(sbi->s_zmap, sbi->s_zmap_blocks, 98 struct minix_sb_info *sbi = minix_sb(sb);
111 sbi->s_nzones - sbi->s_firstdatazone + 1) 99 u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1);
100
101 return (count_free(sbi->s_zmap, sb->s_blocksize, bits)
112 << sbi->s_log_zone_size); 102 << sbi->s_log_zone_size);
113} 103}
114 104
@@ -273,7 +263,10 @@ struct inode *minix_new_inode(const struct inode *dir, int mode, int *error)
273 return inode; 263 return inode;
274} 264}
275 265
276unsigned long minix_count_free_inodes(struct minix_sb_info *sbi) 266unsigned long minix_count_free_inodes(struct super_block *sb)
277{ 267{
278 return count_free(sbi->s_imap, sbi->s_imap_blocks, sbi->s_ninodes + 1); 268 struct minix_sb_info *sbi = minix_sb(sb);
269 u32 bits = sbi->s_ninodes + 1;
270
271 return count_free(sbi->s_imap, sb->s_blocksize, bits);
279} 272}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 64cdcd662ffc..1d9e33966db0 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -279,6 +279,27 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
279 else if (sbi->s_mount_state & MINIX_ERROR_FS) 279 else if (sbi->s_mount_state & MINIX_ERROR_FS)
280 printk("MINIX-fs: mounting file system with errors, " 280 printk("MINIX-fs: mounting file system with errors, "
281 "running fsck is recommended\n"); 281 "running fsck is recommended\n");
282
283 /* Apparently minix can create filesystems that allocate more blocks for
284 * the bitmaps than needed. We simply ignore that, but verify it didn't
285 * create one with not enough blocks and bail out if so.
286 */
287 block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
288 if (sbi->s_imap_blocks < block) {
289 printk("MINIX-fs: file system does not have enough "
290 "imap blocks allocated. Refusing to mount\n");
291 goto out_iput;
292 }
293
294 block = minix_blocks_needed(
295 (sbi->s_nzones - (sbi->s_firstdatazone + 1)),
296 s->s_blocksize);
297 if (sbi->s_zmap_blocks < block) {
298 printk("MINIX-fs: file system does not have enough "
299 "zmap blocks allocated. Refusing to mount.\n");
300 goto out_iput;
301 }
302
282 return 0; 303 return 0;
283 304
284out_iput: 305out_iput:
@@ -339,10 +360,10 @@ static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
339 buf->f_type = sb->s_magic; 360 buf->f_type = sb->s_magic;
340 buf->f_bsize = sb->s_blocksize; 361 buf->f_bsize = sb->s_blocksize;
341 buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size; 362 buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
342 buf->f_bfree = minix_count_free_blocks(sbi); 363 buf->f_bfree = minix_count_free_blocks(sb);
343 buf->f_bavail = buf->f_bfree; 364 buf->f_bavail = buf->f_bfree;
344 buf->f_files = sbi->s_ninodes; 365 buf->f_files = sbi->s_ninodes;
345 buf->f_ffree = minix_count_free_inodes(sbi); 366 buf->f_ffree = minix_count_free_inodes(sb);
346 buf->f_namelen = sbi->s_namelen; 367 buf->f_namelen = sbi->s_namelen;
347 buf->f_fsid.val[0] = (u32)id; 368 buf->f_fsid.val[0] = (u32)id;
348 buf->f_fsid.val[1] = (u32)(id >> 32); 369 buf->f_fsid.val[1] = (u32)(id >> 32);
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index 341e2122879a..26bbd55e82ea 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -48,10 +48,10 @@ extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, stru
48extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); 48extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
49extern struct inode * minix_new_inode(const struct inode *, int, int *); 49extern struct inode * minix_new_inode(const struct inode *, int, int *);
50extern void minix_free_inode(struct inode * inode); 50extern void minix_free_inode(struct inode * inode);
51extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi); 51extern unsigned long minix_count_free_inodes(struct super_block *sb);
52extern int minix_new_block(struct inode * inode); 52extern int minix_new_block(struct inode * inode);
53extern void minix_free_block(struct inode *inode, unsigned long block); 53extern void minix_free_block(struct inode *inode, unsigned long block);
54extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi); 54extern unsigned long minix_count_free_blocks(struct super_block *sb);
55extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *); 55extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *);
56extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len); 56extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
57 57
@@ -88,6 +88,11 @@ static inline struct minix_inode_info *minix_i(struct inode *inode)
88 return list_entry(inode, struct minix_inode_info, vfs_inode); 88 return list_entry(inode, struct minix_inode_info, vfs_inode);
89} 89}
90 90
91static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize)
92{
93 return DIV_ROUND_UP(bits, blocksize * 8);
94}
95
91#if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \ 96#if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \
92 defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED) 97 defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED)
93 98
@@ -125,7 +130,7 @@ static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size)
125 if (!size) 130 if (!size)
126 return 0; 131 return 0;
127 132
128 size = (size >> 4) + ((size & 15) > 0); 133 size >>= 4;
129 while (*p++ == 0xffff) { 134 while (*p++ == 0xffff) {
130 if (--size == 0) 135 if (--size == 0)
131 return (p - addr) << 4; 136 return (p - addr) << 4;
diff --git a/fs/namespace.c b/fs/namespace.c
index e5e1c7d1839b..cfc6d4448aa5 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1048,15 +1048,12 @@ static int show_mountinfo(struct seq_file *m, void *v)
1048 if (err) 1048 if (err)
1049 goto out; 1049 goto out;
1050 seq_putc(m, ' '); 1050 seq_putc(m, ' ');
1051 seq_path_root(m, &mnt_path, &root, " \t\n\\"); 1051
1052 if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) { 1052 /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
1053 /* 1053 err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
1054 * Mountpoint is outside root, discard that one. Ugly, 1054 if (err)
1055 * but less so than trying to do that in iterator in a 1055 goto out;
1056 * race-free way (due to renames). 1056
1057 */
1058 return SEQ_SKIP;
1059 }
1060 seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw"); 1057 seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
1061 show_mnt_opts(m, mnt); 1058 show_mnt_opts(m, mnt);
1062 1059
@@ -2483,11 +2480,43 @@ struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt)
2483 __mnt_make_longterm(mnt); 2480 __mnt_make_longterm(mnt);
2484 new_ns->root = mnt; 2481 new_ns->root = mnt;
2485 list_add(&new_ns->list, &new_ns->root->mnt_list); 2482 list_add(&new_ns->list, &new_ns->root->mnt_list);
2483 } else {
2484 mntput(mnt);
2486 } 2485 }
2487 return new_ns; 2486 return new_ns;
2488} 2487}
2489EXPORT_SYMBOL(create_mnt_ns); 2488EXPORT_SYMBOL(create_mnt_ns);
2490 2489
2490struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
2491{
2492 struct mnt_namespace *ns;
2493 struct super_block *s;
2494 struct path path;
2495 int err;
2496
2497 ns = create_mnt_ns(mnt);
2498 if (IS_ERR(ns))
2499 return ERR_CAST(ns);
2500
2501 err = vfs_path_lookup(mnt->mnt_root, mnt,
2502 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
2503
2504 put_mnt_ns(ns);
2505
2506 if (err)
2507 return ERR_PTR(err);
2508
2509 /* trade a vfsmount reference for active sb one */
2510 s = path.mnt->mnt_sb;
2511 atomic_inc(&s->s_active);
2512 mntput(path.mnt);
2513 /* lock the sucker */
2514 down_write(&s->s_umount);
2515 /* ... and return the root of (sub)tree on it */
2516 return path.dentry;
2517}
2518EXPORT_SYMBOL(mount_subtree);
2519
2491SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, 2520SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
2492 char __user *, type, unsigned long, flags, void __user *, data) 2521 char __user *, type, unsigned long, flags, void __user *, data)
2493{ 2522{
@@ -2744,3 +2773,8 @@ void kern_unmount(struct vfsmount *mnt)
2744 } 2773 }
2745} 2774}
2746EXPORT_SYMBOL(kern_unmount); 2775EXPORT_SYMBOL(kern_unmount);
2776
2777bool our_mnt(struct vfsmount *mnt)
2778{
2779 return check_mnt(mnt);
2780}
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 5b5fa33b6b9d..cbd1a61c110a 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -548,7 +548,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
548 548
549 error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY); 549 error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
550 if (error) 550 if (error)
551 goto out_bdi; 551 goto out_fput;
552 552
553 server->ncp_filp = ncp_filp; 553 server->ncp_filp = ncp_filp;
554 server->ncp_sock = sock; 554 server->ncp_sock = sock;
@@ -559,7 +559,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
559 error = -EBADF; 559 error = -EBADF;
560 server->info_filp = fget(data.info_fd); 560 server->info_filp = fget(data.info_fd);
561 if (!server->info_filp) 561 if (!server->info_filp)
562 goto out_fput; 562 goto out_bdi;
563 error = -ENOTSOCK; 563 error = -ENOTSOCK;
564 sock_inode = server->info_filp->f_path.dentry->d_inode; 564 sock_inode = server->info_filp->f_path.dentry->d_inode;
565 if (!S_ISSOCK(sock_inode->i_mode)) 565 if (!S_ISSOCK(sock_inode->i_mode))
@@ -746,9 +746,9 @@ out_nls:
746out_fput2: 746out_fput2:
747 if (server->info_filp) 747 if (server->info_filp)
748 fput(server->info_filp); 748 fput(server->info_filp);
749out_fput:
750 bdi_destroy(&server->bdi);
751out_bdi: 749out_bdi:
750 bdi_destroy(&server->bdi);
751out_fput:
752 /* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>: 752 /* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
753 * 753 *
754 * The previously used put_filp(ncp_filp); was bogus, since 754 * The previously used put_filp(ncp_filp); was bogus, since
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index b238d95ac48c..ac2899098147 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1468,12 +1468,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
1468 res = NULL; 1468 res = NULL;
1469 goto out; 1469 goto out;
1470 /* This turned out not to be a regular file */ 1470 /* This turned out not to be a regular file */
1471 case -EISDIR:
1471 case -ENOTDIR: 1472 case -ENOTDIR:
1472 goto no_open; 1473 goto no_open;
1473 case -ELOOP: 1474 case -ELOOP:
1474 if (!(nd->intent.open.flags & O_NOFOLLOW)) 1475 if (!(nd->intent.open.flags & O_NOFOLLOW))
1475 goto no_open; 1476 goto no_open;
1476 /* case -EISDIR: */
1477 /* case -EINVAL: */ 1477 /* case -EINVAL: */
1478 default: 1478 default:
1479 res = ERR_CAST(inode); 1479 res = ERR_CAST(inode);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 0a1f8312b4dc..eca56d4b39c0 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -40,48 +40,8 @@
40 40
41#define NFSDBG_FACILITY NFSDBG_FILE 41#define NFSDBG_FACILITY NFSDBG_FILE
42 42
43static int nfs_file_open(struct inode *, struct file *);
44static int nfs_file_release(struct inode *, struct file *);
45static loff_t nfs_file_llseek(struct file *file, loff_t offset, int origin);
46static int nfs_file_mmap(struct file *, struct vm_area_struct *);
47static ssize_t nfs_file_splice_read(struct file *filp, loff_t *ppos,
48 struct pipe_inode_info *pipe,
49 size_t count, unsigned int flags);
50static ssize_t nfs_file_read(struct kiocb *, const struct iovec *iov,
51 unsigned long nr_segs, loff_t pos);
52static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
53 struct file *filp, loff_t *ppos,
54 size_t count, unsigned int flags);
55static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov,
56 unsigned long nr_segs, loff_t pos);
57static int nfs_file_flush(struct file *, fl_owner_t id);
58static int nfs_file_fsync(struct file *, loff_t, loff_t, int datasync);
59static int nfs_check_flags(int flags);
60static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
61static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
62static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
63
64static const struct vm_operations_struct nfs_file_vm_ops; 43static const struct vm_operations_struct nfs_file_vm_ops;
65 44
66const struct file_operations nfs_file_operations = {
67 .llseek = nfs_file_llseek,
68 .read = do_sync_read,
69 .write = do_sync_write,
70 .aio_read = nfs_file_read,
71 .aio_write = nfs_file_write,
72 .mmap = nfs_file_mmap,
73 .open = nfs_file_open,
74 .flush = nfs_file_flush,
75 .release = nfs_file_release,
76 .fsync = nfs_file_fsync,
77 .lock = nfs_lock,
78 .flock = nfs_flock,
79 .splice_read = nfs_file_splice_read,
80 .splice_write = nfs_file_splice_write,
81 .check_flags = nfs_check_flags,
82 .setlease = nfs_setlease,
83};
84
85const struct inode_operations nfs_file_inode_operations = { 45const struct inode_operations nfs_file_inode_operations = {
86 .permission = nfs_permission, 46 .permission = nfs_permission,
87 .getattr = nfs_getattr, 47 .getattr = nfs_getattr,
@@ -886,3 +846,54 @@ static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
886 file->f_path.dentry->d_name.name, arg); 846 file->f_path.dentry->d_name.name, arg);
887 return -EINVAL; 847 return -EINVAL;
888} 848}
849
850const struct file_operations nfs_file_operations = {
851 .llseek = nfs_file_llseek,
852 .read = do_sync_read,
853 .write = do_sync_write,
854 .aio_read = nfs_file_read,
855 .aio_write = nfs_file_write,
856 .mmap = nfs_file_mmap,
857 .open = nfs_file_open,
858 .flush = nfs_file_flush,
859 .release = nfs_file_release,
860 .fsync = nfs_file_fsync,
861 .lock = nfs_lock,
862 .flock = nfs_flock,
863 .splice_read = nfs_file_splice_read,
864 .splice_write = nfs_file_splice_write,
865 .check_flags = nfs_check_flags,
866 .setlease = nfs_setlease,
867};
868
869#ifdef CONFIG_NFS_V4
870static int
871nfs4_file_open(struct inode *inode, struct file *filp)
872{
873 /*
874 * NFSv4 opens are handled in d_lookup and d_revalidate. If we get to
875 * this point, then something is very wrong
876 */
877 dprintk("NFS: %s called! inode=%p filp=%p\n", __func__, inode, filp);
878 return -ENOTDIR;
879}
880
881const struct file_operations nfs4_file_operations = {
882 .llseek = nfs_file_llseek,
883 .read = do_sync_read,
884 .write = do_sync_write,
885 .aio_read = nfs_file_read,
886 .aio_write = nfs_file_write,
887 .mmap = nfs_file_mmap,
888 .open = nfs4_file_open,
889 .flush = nfs_file_flush,
890 .release = nfs_file_release,
891 .fsync = nfs_file_fsync,
892 .lock = nfs_lock,
893 .flock = nfs_flock,
894 .splice_read = nfs_file_splice_read,
895 .splice_write = nfs_file_splice_write,
896 .check_flags = nfs_check_flags,
897 .setlease = nfs_setlease,
898};
899#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c07a55aec838..50a15fa8cf98 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -291,7 +291,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
291 */ 291 */
292 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops; 292 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
293 if (S_ISREG(inode->i_mode)) { 293 if (S_ISREG(inode->i_mode)) {
294 inode->i_fop = &nfs_file_operations; 294 inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops;
295 inode->i_data.a_ops = &nfs_file_aops; 295 inode->i_data.a_ops = &nfs_file_aops;
296 inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info; 296 inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info;
297 } else if (S_ISDIR(inode->i_mode)) { 297 } else if (S_ISDIR(inode->i_mode)) {
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index c1a1bd8ddf1c..3f4d95751d52 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -299,6 +299,8 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
299extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, 299extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
300 struct list_head *head); 300 struct list_head *head);
301 301
302extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
303 struct inode *inode);
302extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); 304extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
303extern void nfs_readdata_release(struct nfs_read_data *rdata); 305extern void nfs_readdata_release(struct nfs_read_data *rdata);
304 306
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 85f1690ca08c..d4bc9ed91748 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -853,6 +853,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
853 .dentry_ops = &nfs_dentry_operations, 853 .dentry_ops = &nfs_dentry_operations,
854 .dir_inode_ops = &nfs3_dir_inode_operations, 854 .dir_inode_ops = &nfs3_dir_inode_operations,
855 .file_inode_ops = &nfs3_file_inode_operations, 855 .file_inode_ops = &nfs3_file_inode_operations,
856 .file_ops = &nfs_file_operations,
856 .getroot = nfs3_proc_get_root, 857 .getroot = nfs3_proc_get_root,
857 .getattr = nfs3_proc_getattr, 858 .getattr = nfs3_proc_getattr,
858 .setattr = nfs3_proc_setattr, 859 .setattr = nfs3_proc_setattr,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b60fddf606f7..be2bbac13817 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2464,8 +2464,7 @@ static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qst
2464 case -NFS4ERR_BADNAME: 2464 case -NFS4ERR_BADNAME:
2465 return -ENOENT; 2465 return -ENOENT;
2466 case -NFS4ERR_MOVED: 2466 case -NFS4ERR_MOVED:
2467 err = nfs4_get_referral(dir, name, fattr, fhandle); 2467 return nfs4_get_referral(dir, name, fattr, fhandle);
2468 break;
2469 case -NFS4ERR_WRONGSEC: 2468 case -NFS4ERR_WRONGSEC:
2470 nfs_fixup_secinfo_attributes(fattr, fhandle); 2469 nfs_fixup_secinfo_attributes(fattr, fhandle);
2471 } 2470 }
@@ -6253,6 +6252,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
6253 .dentry_ops = &nfs4_dentry_operations, 6252 .dentry_ops = &nfs4_dentry_operations,
6254 .dir_inode_ops = &nfs4_dir_inode_operations, 6253 .dir_inode_ops = &nfs4_dir_inode_operations,
6255 .file_inode_ops = &nfs4_file_inode_operations, 6254 .file_inode_ops = &nfs4_file_inode_operations,
6255 .file_ops = &nfs4_file_operations,
6256 .getroot = nfs4_proc_get_root, 6256 .getroot = nfs4_proc_get_root,
6257 .getattr = nfs4_proc_getattr, 6257 .getattr = nfs4_proc_getattr,
6258 .setattr = nfs4_proc_setattr, 6258 .setattr = nfs4_proc_setattr,
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index baf73536bc04..8e672a2b2d69 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1260,6 +1260,25 @@ pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1260} 1260}
1261EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); 1261EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
1262 1262
1263static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
1264{
1265 struct nfs_pageio_descriptor pgio;
1266
1267 put_lseg(data->lseg);
1268 data->lseg = NULL;
1269 dprintk("pnfs write error = %d\n", data->pnfs_error);
1270
1271 nfs_pageio_init_read_mds(&pgio, data->inode);
1272
1273 while (!list_empty(&data->pages)) {
1274 struct nfs_page *req = nfs_list_entry(data->pages.next);
1275
1276 nfs_list_remove_request(req);
1277 nfs_pageio_add_request(&pgio, req);
1278 }
1279 nfs_pageio_complete(&pgio);
1280}
1281
1263/* 1282/*
1264 * Called by non rpc-based layout drivers 1283 * Called by non rpc-based layout drivers
1265 */ 1284 */
@@ -1268,11 +1287,8 @@ void pnfs_ld_read_done(struct nfs_read_data *data)
1268 if (likely(!data->pnfs_error)) { 1287 if (likely(!data->pnfs_error)) {
1269 __nfs4_read_done_cb(data); 1288 __nfs4_read_done_cb(data);
1270 data->mds_ops->rpc_call_done(&data->task, data); 1289 data->mds_ops->rpc_call_done(&data->task, data);
1271 } else { 1290 } else
1272 put_lseg(data->lseg); 1291 pnfs_ld_handle_read_error(data);
1273 data->lseg = NULL;
1274 dprintk("pnfs write error = %d\n", data->pnfs_error);
1275 }
1276 data->mds_ops->rpc_release(data); 1292 data->mds_ops->rpc_release(data);
1277} 1293}
1278EXPORT_SYMBOL_GPL(pnfs_ld_read_done); 1294EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index ac40b8535d7e..f48125da198a 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -710,6 +710,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
710 .dentry_ops = &nfs_dentry_operations, 710 .dentry_ops = &nfs_dentry_operations,
711 .dir_inode_ops = &nfs_dir_inode_operations, 711 .dir_inode_ops = &nfs_dir_inode_operations,
712 .file_inode_ops = &nfs_file_inode_operations, 712 .file_inode_ops = &nfs_file_inode_operations,
713 .file_ops = &nfs_file_operations,
713 .getroot = nfs_proc_get_root, 714 .getroot = nfs_proc_get_root,
714 .getattr = nfs_proc_getattr, 715 .getattr = nfs_proc_getattr,
715 .setattr = nfs_proc_setattr, 716 .setattr = nfs_proc_setattr,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 8b48ec63f722..cfa175c223dc 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -109,7 +109,7 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
109 } 109 }
110} 110}
111 111
112static void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio, 112void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
113 struct inode *inode) 113 struct inode *inode)
114{ 114{
115 nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, 115 nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
@@ -534,23 +534,13 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
534static void nfs_readpage_release_full(void *calldata) 534static void nfs_readpage_release_full(void *calldata)
535{ 535{
536 struct nfs_read_data *data = calldata; 536 struct nfs_read_data *data = calldata;
537 struct nfs_pageio_descriptor pgio;
538 537
539 if (data->pnfs_error) {
540 nfs_pageio_init_read_mds(&pgio, data->inode);
541 pgio.pg_recoalesce = 1;
542 }
543 while (!list_empty(&data->pages)) { 538 while (!list_empty(&data->pages)) {
544 struct nfs_page *req = nfs_list_entry(data->pages.next); 539 struct nfs_page *req = nfs_list_entry(data->pages.next);
545 540
546 nfs_list_remove_request(req); 541 nfs_list_remove_request(req);
547 if (!data->pnfs_error) 542 nfs_readpage_release(req);
548 nfs_readpage_release(req);
549 else
550 nfs_pageio_add_request(&pgio, req);
551 } 543 }
552 if (data->pnfs_error)
553 nfs_pageio_complete(&pgio);
554 nfs_readdata_release(calldata); 544 nfs_readdata_release(calldata);
555} 545}
556 546
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 480b3b6bf71e..134777406ee3 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2787,43 +2787,18 @@ static void nfs_referral_loop_unprotect(void)
2787static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt, 2787static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
2788 const char *export_path) 2788 const char *export_path)
2789{ 2789{
2790 struct mnt_namespace *ns_private;
2791 struct super_block *s;
2792 struct dentry *dentry; 2790 struct dentry *dentry;
2793 struct path path; 2791 int ret = nfs_referral_loop_protect();
2794 int ret;
2795
2796 ns_private = create_mnt_ns(root_mnt);
2797 ret = PTR_ERR(ns_private);
2798 if (IS_ERR(ns_private))
2799 goto out_mntput;
2800
2801 ret = nfs_referral_loop_protect();
2802 if (ret != 0)
2803 goto out_put_mnt_ns;
2804 2792
2805 ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt, 2793 if (ret) {
2806 export_path, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 2794 mntput(root_mnt);
2795 return ERR_PTR(ret);
2796 }
2807 2797
2798 dentry = mount_subtree(root_mnt, export_path);
2808 nfs_referral_loop_unprotect(); 2799 nfs_referral_loop_unprotect();
2809 put_mnt_ns(ns_private);
2810
2811 if (ret != 0)
2812 goto out_err;
2813
2814 s = path.mnt->mnt_sb;
2815 atomic_inc(&s->s_active);
2816 dentry = dget(path.dentry);
2817 2800
2818 path_put(&path);
2819 down_write(&s->s_umount);
2820 return dentry; 2801 return dentry;
2821out_put_mnt_ns:
2822 put_mnt_ns(ns_private);
2823out_mntput:
2824 mntput(root_mnt);
2825out_err:
2826 return ERR_PTR(ret);
2827} 2802}
2828 2803
2829static struct dentry *nfs4_try_mount(int flags, const char *dev_name, 2804static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index ed553c60de82..3165aebb43c8 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5699,7 +5699,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
5699 OCFS2_JOURNAL_ACCESS_WRITE); 5699 OCFS2_JOURNAL_ACCESS_WRITE);
5700 if (ret) { 5700 if (ret) {
5701 mlog_errno(ret); 5701 mlog_errno(ret);
5702 goto out; 5702 goto out_commit;
5703 } 5703 }
5704 5704
5705 dquot_free_space_nodirty(inode, 5705 dquot_free_space_nodirty(inode,
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index c1efe939c774..78b68af3b0e3 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -290,7 +290,15 @@ static int ocfs2_readpage(struct file *file, struct page *page)
290 } 290 }
291 291
292 if (down_read_trylock(&oi->ip_alloc_sem) == 0) { 292 if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
293 /*
294 * Unlock the page and cycle ip_alloc_sem so that we don't
295 * busyloop waiting for ip_alloc_sem to unlock
296 */
293 ret = AOP_TRUNCATED_PAGE; 297 ret = AOP_TRUNCATED_PAGE;
298 unlock_page(page);
299 unlock = 0;
300 down_read(&oi->ip_alloc_sem);
301 up_read(&oi->ip_alloc_sem);
294 goto out_inode_unlock; 302 goto out_inode_unlock;
295 } 303 }
296 304
@@ -563,6 +571,7 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
563{ 571{
564 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 572 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
565 int level; 573 int level;
574 wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
566 575
567 /* this io's submitter should not have unlocked this before we could */ 576 /* this io's submitter should not have unlocked this before we could */
568 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); 577 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
@@ -570,6 +579,15 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
570 if (ocfs2_iocb_is_sem_locked(iocb)) 579 if (ocfs2_iocb_is_sem_locked(iocb))
571 ocfs2_iocb_clear_sem_locked(iocb); 580 ocfs2_iocb_clear_sem_locked(iocb);
572 581
582 if (ocfs2_iocb_is_unaligned_aio(iocb)) {
583 ocfs2_iocb_clear_unaligned_aio(iocb);
584
585 if (atomic_dec_and_test(&OCFS2_I(inode)->ip_unaligned_aio) &&
586 waitqueue_active(wq)) {
587 wake_up_all(wq);
588 }
589 }
590
573 ocfs2_iocb_clear_rw_locked(iocb); 591 ocfs2_iocb_clear_rw_locked(iocb);
574 592
575 level = ocfs2_iocb_rw_locked_level(iocb); 593 level = ocfs2_iocb_rw_locked_level(iocb);
@@ -863,6 +881,12 @@ struct ocfs2_write_ctxt {
863 struct page *w_target_page; 881 struct page *w_target_page;
864 882
865 /* 883 /*
884 * w_target_locked is used for page_mkwrite path indicating no unlocking
885 * against w_target_page in ocfs2_write_end_nolock.
886 */
887 unsigned int w_target_locked:1;
888
889 /*
866 * ocfs2_write_end() uses this to know what the real range to 890 * ocfs2_write_end() uses this to know what the real range to
867 * write in the target should be. 891 * write in the target should be.
868 */ 892 */
@@ -895,6 +919,24 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
895 919
896static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) 920static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
897{ 921{
922 int i;
923
924 /*
925 * w_target_locked is only set to true in the page_mkwrite() case.
926 * The intent is to allow us to lock the target page from write_begin()
927 * to write_end(). The caller must hold a ref on w_target_page.
928 */
929 if (wc->w_target_locked) {
930 BUG_ON(!wc->w_target_page);
931 for (i = 0; i < wc->w_num_pages; i++) {
932 if (wc->w_target_page == wc->w_pages[i]) {
933 wc->w_pages[i] = NULL;
934 break;
935 }
936 }
937 mark_page_accessed(wc->w_target_page);
938 page_cache_release(wc->w_target_page);
939 }
898 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); 940 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
899 941
900 brelse(wc->w_di_bh); 942 brelse(wc->w_di_bh);
@@ -1132,20 +1174,17 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1132 */ 1174 */
1133 lock_page(mmap_page); 1175 lock_page(mmap_page);
1134 1176
1177 /* Exit and let the caller retry */
1135 if (mmap_page->mapping != mapping) { 1178 if (mmap_page->mapping != mapping) {
1179 WARN_ON(mmap_page->mapping);
1136 unlock_page(mmap_page); 1180 unlock_page(mmap_page);
1137 /* 1181 ret = -EAGAIN;
1138 * Sanity check - the locking in
1139 * ocfs2_pagemkwrite() should ensure
1140 * that this code doesn't trigger.
1141 */
1142 ret = -EINVAL;
1143 mlog_errno(ret);
1144 goto out; 1182 goto out;
1145 } 1183 }
1146 1184
1147 page_cache_get(mmap_page); 1185 page_cache_get(mmap_page);
1148 wc->w_pages[i] = mmap_page; 1186 wc->w_pages[i] = mmap_page;
1187 wc->w_target_locked = true;
1149 } else { 1188 } else {
1150 wc->w_pages[i] = find_or_create_page(mapping, index, 1189 wc->w_pages[i] = find_or_create_page(mapping, index,
1151 GFP_NOFS); 1190 GFP_NOFS);
@@ -1160,6 +1199,8 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1160 wc->w_target_page = wc->w_pages[i]; 1199 wc->w_target_page = wc->w_pages[i];
1161 } 1200 }
1162out: 1201out:
1202 if (ret)
1203 wc->w_target_locked = false;
1163 return ret; 1204 return ret;
1164} 1205}
1165 1206
@@ -1817,11 +1858,23 @@ try_again:
1817 */ 1858 */
1818 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, 1859 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
1819 cluster_of_pages, mmap_page); 1860 cluster_of_pages, mmap_page);
1820 if (ret) { 1861 if (ret && ret != -EAGAIN) {
1821 mlog_errno(ret); 1862 mlog_errno(ret);
1822 goto out_quota; 1863 goto out_quota;
1823 } 1864 }
1824 1865
1866 /*
1867 * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
1868 * the target page. In this case, we exit with no error and no target
1869 * page. This will trigger the caller, page_mkwrite(), to re-try
1870 * the operation.
1871 */
1872 if (ret == -EAGAIN) {
1873 BUG_ON(wc->w_target_page);
1874 ret = 0;
1875 goto out_quota;
1876 }
1877
1825 ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, 1878 ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
1826 len); 1879 len);
1827 if (ret) { 1880 if (ret) {
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index 75cf3ad987a6..ffb2da370a99 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -78,6 +78,7 @@ enum ocfs2_iocb_lock_bits {
78 OCFS2_IOCB_RW_LOCK = 0, 78 OCFS2_IOCB_RW_LOCK = 0,
79 OCFS2_IOCB_RW_LOCK_LEVEL, 79 OCFS2_IOCB_RW_LOCK_LEVEL,
80 OCFS2_IOCB_SEM, 80 OCFS2_IOCB_SEM,
81 OCFS2_IOCB_UNALIGNED_IO,
81 OCFS2_IOCB_NUM_LOCKS 82 OCFS2_IOCB_NUM_LOCKS
82}; 83};
83 84
@@ -91,4 +92,17 @@ enum ocfs2_iocb_lock_bits {
91 clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) 92 clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
92#define ocfs2_iocb_is_sem_locked(iocb) \ 93#define ocfs2_iocb_is_sem_locked(iocb) \
93 test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) 94 test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
95
96#define ocfs2_iocb_set_unaligned_aio(iocb) \
97 set_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
98#define ocfs2_iocb_clear_unaligned_aio(iocb) \
99 clear_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
100#define ocfs2_iocb_is_unaligned_aio(iocb) \
101 test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
102
103#define OCFS2_IOEND_WQ_HASH_SZ 37
104#define ocfs2_ioend_wq(v) (&ocfs2__ioend_wq[((unsigned long)(v)) %\
105 OCFS2_IOEND_WQ_HASH_SZ])
106extern wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
107
94#endif /* OCFS2_FILE_H */ 108#endif /* OCFS2_FILE_H */
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 9a3e6bbff27b..a4e855e3690e 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -216,6 +216,7 @@ struct o2hb_region {
216 216
217 struct list_head hr_all_item; 217 struct list_head hr_all_item;
218 unsigned hr_unclean_stop:1, 218 unsigned hr_unclean_stop:1,
219 hr_aborted_start:1,
219 hr_item_pinned:1, 220 hr_item_pinned:1,
220 hr_item_dropped:1; 221 hr_item_dropped:1;
221 222
@@ -254,6 +255,10 @@ struct o2hb_region {
254 * a more complete api that doesn't lead to this sort of fragility. */ 255 * a more complete api that doesn't lead to this sort of fragility. */
255 atomic_t hr_steady_iterations; 256 atomic_t hr_steady_iterations;
256 257
258 /* terminate o2hb thread if it does not reach steady state
259 * (hr_steady_iterations == 0) within hr_unsteady_iterations */
260 atomic_t hr_unsteady_iterations;
261
257 char hr_dev_name[BDEVNAME_SIZE]; 262 char hr_dev_name[BDEVNAME_SIZE];
258 263
259 unsigned int hr_timeout_ms; 264 unsigned int hr_timeout_ms;
@@ -324,6 +329,10 @@ static void o2hb_write_timeout(struct work_struct *work)
324 329
325static void o2hb_arm_write_timeout(struct o2hb_region *reg) 330static void o2hb_arm_write_timeout(struct o2hb_region *reg)
326{ 331{
332 /* Arm writeout only after thread reaches steady state */
333 if (atomic_read(&reg->hr_steady_iterations) != 0)
334 return;
335
327 mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n", 336 mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n",
328 O2HB_MAX_WRITE_TIMEOUT_MS); 337 O2HB_MAX_WRITE_TIMEOUT_MS);
329 338
@@ -537,9 +546,14 @@ static int o2hb_verify_crc(struct o2hb_region *reg,
537 return read == computed; 546 return read == computed;
538} 547}
539 548
540/* We want to make sure that nobody is heartbeating on top of us -- 549/*
541 * this will help detect an invalid configuration. */ 550 * Compare the slot data with what we wrote in the last iteration.
542static void o2hb_check_last_timestamp(struct o2hb_region *reg) 551 * If the match fails, print an appropriate error message. This is to
552 * detect errors like... another node hearting on the same slot,
553 * flaky device that is losing writes, etc.
554 * Returns 1 if check succeeds, 0 otherwise.
555 */
556static int o2hb_check_own_slot(struct o2hb_region *reg)
543{ 557{
544 struct o2hb_disk_slot *slot; 558 struct o2hb_disk_slot *slot;
545 struct o2hb_disk_heartbeat_block *hb_block; 559 struct o2hb_disk_heartbeat_block *hb_block;
@@ -548,13 +562,13 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg)
548 slot = &reg->hr_slots[o2nm_this_node()]; 562 slot = &reg->hr_slots[o2nm_this_node()];
549 /* Don't check on our 1st timestamp */ 563 /* Don't check on our 1st timestamp */
550 if (!slot->ds_last_time) 564 if (!slot->ds_last_time)
551 return; 565 return 0;
552 566
553 hb_block = slot->ds_raw_block; 567 hb_block = slot->ds_raw_block;
554 if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && 568 if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time &&
555 le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && 569 le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation &&
556 hb_block->hb_node == slot->ds_node_num) 570 hb_block->hb_node == slot->ds_node_num)
557 return; 571 return 1;
558 572
559#define ERRSTR1 "Another node is heartbeating on device" 573#define ERRSTR1 "Another node is heartbeating on device"
560#define ERRSTR2 "Heartbeat generation mismatch on device" 574#define ERRSTR2 "Heartbeat generation mismatch on device"
@@ -574,6 +588,8 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg)
574 (unsigned long long)slot->ds_last_time, hb_block->hb_node, 588 (unsigned long long)slot->ds_last_time, hb_block->hb_node,
575 (unsigned long long)le64_to_cpu(hb_block->hb_generation), 589 (unsigned long long)le64_to_cpu(hb_block->hb_generation),
576 (unsigned long long)le64_to_cpu(hb_block->hb_seq)); 590 (unsigned long long)le64_to_cpu(hb_block->hb_seq));
591
592 return 0;
577} 593}
578 594
579static inline void o2hb_prepare_block(struct o2hb_region *reg, 595static inline void o2hb_prepare_block(struct o2hb_region *reg,
@@ -719,17 +735,24 @@ static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
719 o2nm_node_put(node); 735 o2nm_node_put(node);
720} 736}
721 737
722static void o2hb_set_quorum_device(struct o2hb_region *reg, 738static void o2hb_set_quorum_device(struct o2hb_region *reg)
723 struct o2hb_disk_slot *slot)
724{ 739{
725 assert_spin_locked(&o2hb_live_lock);
726
727 if (!o2hb_global_heartbeat_active()) 740 if (!o2hb_global_heartbeat_active())
728 return; 741 return;
729 742
730 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) 743 /* Prevent race with o2hb_heartbeat_group_drop_item() */
744 if (kthread_should_stop())
745 return;
746
747 /* Tag region as quorum only after thread reaches steady state */
748 if (atomic_read(&reg->hr_steady_iterations) != 0)
731 return; 749 return;
732 750
751 spin_lock(&o2hb_live_lock);
752
753 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
754 goto unlock;
755
733 /* 756 /*
734 * A region can be added to the quorum only when it sees all 757 * A region can be added to the quorum only when it sees all
735 * live nodes heartbeat on it. In other words, the region has been 758 * live nodes heartbeat on it. In other words, the region has been
@@ -737,13 +760,10 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
737 */ 760 */
738 if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap, 761 if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap,
739 sizeof(o2hb_live_node_bitmap))) 762 sizeof(o2hb_live_node_bitmap)))
740 return; 763 goto unlock;
741
742 if (slot->ds_changed_samples < O2HB_LIVE_THRESHOLD)
743 return;
744 764
745 printk(KERN_NOTICE "o2hb: Region %s is now a quorum device\n", 765 printk(KERN_NOTICE "o2hb: Region %s (%s) is now a quorum device\n",
746 config_item_name(&reg->hr_item)); 766 config_item_name(&reg->hr_item), reg->hr_dev_name);
747 767
748 set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); 768 set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
749 769
@@ -754,6 +774,8 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
754 if (o2hb_pop_count(&o2hb_quorum_region_bitmap, 774 if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
755 O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF) 775 O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
756 o2hb_region_unpin(NULL); 776 o2hb_region_unpin(NULL);
777unlock:
778 spin_unlock(&o2hb_live_lock);
757} 779}
758 780
759static int o2hb_check_slot(struct o2hb_region *reg, 781static int o2hb_check_slot(struct o2hb_region *reg,
@@ -925,8 +947,6 @@ fire_callbacks:
925 slot->ds_equal_samples = 0; 947 slot->ds_equal_samples = 0;
926 } 948 }
927out: 949out:
928 o2hb_set_quorum_device(reg, slot);
929
930 spin_unlock(&o2hb_live_lock); 950 spin_unlock(&o2hb_live_lock);
931 951
932 o2hb_run_event_list(&event); 952 o2hb_run_event_list(&event);
@@ -957,7 +977,8 @@ static int o2hb_highest_node(unsigned long *nodes,
957 977
958static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) 978static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
959{ 979{
960 int i, ret, highest_node, change = 0; 980 int i, ret, highest_node;
981 int membership_change = 0, own_slot_ok = 0;
961 unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; 982 unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
962 unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 983 unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
963 struct o2hb_bio_wait_ctxt write_wc; 984 struct o2hb_bio_wait_ctxt write_wc;
@@ -966,7 +987,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
966 sizeof(configured_nodes)); 987 sizeof(configured_nodes));
967 if (ret) { 988 if (ret) {
968 mlog_errno(ret); 989 mlog_errno(ret);
969 return ret; 990 goto bail;
970 } 991 }
971 992
972 /* 993 /*
@@ -982,8 +1003,9 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
982 1003
983 highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES); 1004 highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
984 if (highest_node >= O2NM_MAX_NODES) { 1005 if (highest_node >= O2NM_MAX_NODES) {
985 mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n"); 1006 mlog(ML_NOTICE, "o2hb: No configured nodes found!\n");
986 return -EINVAL; 1007 ret = -EINVAL;
1008 goto bail;
987 } 1009 }
988 1010
989 /* No sense in reading the slots of nodes that don't exist 1011 /* No sense in reading the slots of nodes that don't exist
@@ -993,29 +1015,27 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
993 ret = o2hb_read_slots(reg, highest_node + 1); 1015 ret = o2hb_read_slots(reg, highest_node + 1);
994 if (ret < 0) { 1016 if (ret < 0) {
995 mlog_errno(ret); 1017 mlog_errno(ret);
996 return ret; 1018 goto bail;
997 } 1019 }
998 1020
999 /* With an up to date view of the slots, we can check that no 1021 /* With an up to date view of the slots, we can check that no
1000 * other node has been improperly configured to heartbeat in 1022 * other node has been improperly configured to heartbeat in
1001 * our slot. */ 1023 * our slot. */
1002 o2hb_check_last_timestamp(reg); 1024 own_slot_ok = o2hb_check_own_slot(reg);
1003 1025
1004 /* fill in the proper info for our next heartbeat */ 1026 /* fill in the proper info for our next heartbeat */
1005 o2hb_prepare_block(reg, reg->hr_generation); 1027 o2hb_prepare_block(reg, reg->hr_generation);
1006 1028
1007 /* And fire off the write. Note that we don't wait on this I/O
1008 * until later. */
1009 ret = o2hb_issue_node_write(reg, &write_wc); 1029 ret = o2hb_issue_node_write(reg, &write_wc);
1010 if (ret < 0) { 1030 if (ret < 0) {
1011 mlog_errno(ret); 1031 mlog_errno(ret);
1012 return ret; 1032 goto bail;
1013 } 1033 }
1014 1034
1015 i = -1; 1035 i = -1;
1016 while((i = find_next_bit(configured_nodes, 1036 while((i = find_next_bit(configured_nodes,
1017 O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { 1037 O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
1018 change |= o2hb_check_slot(reg, &reg->hr_slots[i]); 1038 membership_change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
1019 } 1039 }
1020 1040
1021 /* 1041 /*
@@ -1030,18 +1050,39 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
1030 * disk */ 1050 * disk */
1031 mlog(ML_ERROR, "Write error %d on device \"%s\"\n", 1051 mlog(ML_ERROR, "Write error %d on device \"%s\"\n",
1032 write_wc.wc_error, reg->hr_dev_name); 1052 write_wc.wc_error, reg->hr_dev_name);
1033 return write_wc.wc_error; 1053 ret = write_wc.wc_error;
1054 goto bail;
1034 } 1055 }
1035 1056
1036 o2hb_arm_write_timeout(reg); 1057 /* Skip disarming the timeout if own slot has stale/bad data */
1058 if (own_slot_ok) {
1059 o2hb_set_quorum_device(reg);
1060 o2hb_arm_write_timeout(reg);
1061 }
1037 1062
1063bail:
1038 /* let the person who launched us know when things are steady */ 1064 /* let the person who launched us know when things are steady */
1039 if (!change && (atomic_read(&reg->hr_steady_iterations) != 0)) { 1065 if (atomic_read(&reg->hr_steady_iterations) != 0) {
1040 if (atomic_dec_and_test(&reg->hr_steady_iterations)) 1066 if (!ret && own_slot_ok && !membership_change) {
1067 if (atomic_dec_and_test(&reg->hr_steady_iterations))
1068 wake_up(&o2hb_steady_queue);
1069 }
1070 }
1071
1072 if (atomic_read(&reg->hr_steady_iterations) != 0) {
1073 if (atomic_dec_and_test(&reg->hr_unsteady_iterations)) {
1074 printk(KERN_NOTICE "o2hb: Unable to stabilize "
1075 "heartbeart on region %s (%s)\n",
1076 config_item_name(&reg->hr_item),
1077 reg->hr_dev_name);
1078 atomic_set(&reg->hr_steady_iterations, 0);
1079 reg->hr_aborted_start = 1;
1041 wake_up(&o2hb_steady_queue); 1080 wake_up(&o2hb_steady_queue);
1081 ret = -EIO;
1082 }
1042 } 1083 }
1043 1084
1044 return 0; 1085 return ret;
1045} 1086}
1046 1087
1047/* Subtract b from a, storing the result in a. a *must* have a larger 1088/* Subtract b from a, storing the result in a. a *must* have a larger
@@ -1095,7 +1136,8 @@ static int o2hb_thread(void *data)
1095 /* Pin node */ 1136 /* Pin node */
1096 o2nm_depend_this_node(); 1137 o2nm_depend_this_node();
1097 1138
1098 while (!kthread_should_stop() && !reg->hr_unclean_stop) { 1139 while (!kthread_should_stop() &&
1140 !reg->hr_unclean_stop && !reg->hr_aborted_start) {
1099 /* We track the time spent inside 1141 /* We track the time spent inside
1100 * o2hb_do_disk_heartbeat so that we avoid more than 1142 * o2hb_do_disk_heartbeat so that we avoid more than
1101 * hr_timeout_ms between disk writes. On busy systems 1143 * hr_timeout_ms between disk writes. On busy systems
@@ -1103,10 +1145,7 @@ static int o2hb_thread(void *data)
1103 * likely to time itself out. */ 1145 * likely to time itself out. */
1104 do_gettimeofday(&before_hb); 1146 do_gettimeofday(&before_hb);
1105 1147
1106 i = 0; 1148 ret = o2hb_do_disk_heartbeat(reg);
1107 do {
1108 ret = o2hb_do_disk_heartbeat(reg);
1109 } while (ret && ++i < 2);
1110 1149
1111 do_gettimeofday(&after_hb); 1150 do_gettimeofday(&after_hb);
1112 elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb); 1151 elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
@@ -1117,7 +1156,8 @@ static int o2hb_thread(void *data)
1117 after_hb.tv_sec, (unsigned long) after_hb.tv_usec, 1156 after_hb.tv_sec, (unsigned long) after_hb.tv_usec,
1118 elapsed_msec); 1157 elapsed_msec);
1119 1158
1120 if (elapsed_msec < reg->hr_timeout_ms) { 1159 if (!kthread_should_stop() &&
1160 elapsed_msec < reg->hr_timeout_ms) {
1121 /* the kthread api has blocked signals for us so no 1161 /* the kthread api has blocked signals for us so no
1122 * need to record the return value. */ 1162 * need to record the return value. */
1123 msleep_interruptible(reg->hr_timeout_ms - elapsed_msec); 1163 msleep_interruptible(reg->hr_timeout_ms - elapsed_msec);
@@ -1134,20 +1174,20 @@ static int o2hb_thread(void *data)
1134 * to timeout on this region when we could just as easily 1174 * to timeout on this region when we could just as easily
1135 * write a clear generation - thus indicating to them that 1175 * write a clear generation - thus indicating to them that
1136 * this node has left this region. 1176 * this node has left this region.
1137 * 1177 */
1138 * XXX: Should we skip this on unclean_stop? */ 1178 if (!reg->hr_unclean_stop && !reg->hr_aborted_start) {
1139 o2hb_prepare_block(reg, 0); 1179 o2hb_prepare_block(reg, 0);
1140 ret = o2hb_issue_node_write(reg, &write_wc); 1180 ret = o2hb_issue_node_write(reg, &write_wc);
1141 if (ret == 0) { 1181 if (ret == 0)
1142 o2hb_wait_on_io(reg, &write_wc); 1182 o2hb_wait_on_io(reg, &write_wc);
1143 } else { 1183 else
1144 mlog_errno(ret); 1184 mlog_errno(ret);
1145 } 1185 }
1146 1186
1147 /* Unpin node */ 1187 /* Unpin node */
1148 o2nm_undepend_this_node(); 1188 o2nm_undepend_this_node();
1149 1189
1150 mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n"); 1190 mlog(ML_HEARTBEAT|ML_KTHREAD, "o2hb thread exiting\n");
1151 1191
1152 return 0; 1192 return 0;
1153} 1193}
@@ -1158,6 +1198,7 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
1158 struct o2hb_debug_buf *db = inode->i_private; 1198 struct o2hb_debug_buf *db = inode->i_private;
1159 struct o2hb_region *reg; 1199 struct o2hb_region *reg;
1160 unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 1200 unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
1201 unsigned long lts;
1161 char *buf = NULL; 1202 char *buf = NULL;
1162 int i = -1; 1203 int i = -1;
1163 int out = 0; 1204 int out = 0;
@@ -1194,9 +1235,11 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
1194 1235
1195 case O2HB_DB_TYPE_REGION_ELAPSED_TIME: 1236 case O2HB_DB_TYPE_REGION_ELAPSED_TIME:
1196 reg = (struct o2hb_region *)db->db_data; 1237 reg = (struct o2hb_region *)db->db_data;
1197 out += snprintf(buf + out, PAGE_SIZE - out, "%u\n", 1238 lts = reg->hr_last_timeout_start;
1198 jiffies_to_msecs(jiffies - 1239 /* If 0, it has never been set before */
1199 reg->hr_last_timeout_start)); 1240 if (lts)
1241 lts = jiffies_to_msecs(jiffies - lts);
1242 out += snprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts);
1200 goto done; 1243 goto done;
1201 1244
1202 case O2HB_DB_TYPE_REGION_PINNED: 1245 case O2HB_DB_TYPE_REGION_PINNED:
@@ -1426,6 +1469,8 @@ static void o2hb_region_release(struct config_item *item)
1426 struct page *page; 1469 struct page *page;
1427 struct o2hb_region *reg = to_o2hb_region(item); 1470 struct o2hb_region *reg = to_o2hb_region(item);
1428 1471
1472 mlog(ML_HEARTBEAT, "hb region release (%s)\n", reg->hr_dev_name);
1473
1429 if (reg->hr_tmp_block) 1474 if (reg->hr_tmp_block)
1430 kfree(reg->hr_tmp_block); 1475 kfree(reg->hr_tmp_block);
1431 1476
@@ -1792,7 +1837,10 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1792 live_threshold <<= 1; 1837 live_threshold <<= 1;
1793 spin_unlock(&o2hb_live_lock); 1838 spin_unlock(&o2hb_live_lock);
1794 } 1839 }
1795 atomic_set(&reg->hr_steady_iterations, live_threshold + 1); 1840 ++live_threshold;
1841 atomic_set(&reg->hr_steady_iterations, live_threshold);
1842 /* unsteady_iterations is double the steady_iterations */
1843 atomic_set(&reg->hr_unsteady_iterations, (live_threshold << 1));
1796 1844
1797 hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", 1845 hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
1798 reg->hr_item.ci_name); 1846 reg->hr_item.ci_name);
@@ -1809,14 +1857,12 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1809 ret = wait_event_interruptible(o2hb_steady_queue, 1857 ret = wait_event_interruptible(o2hb_steady_queue,
1810 atomic_read(&reg->hr_steady_iterations) == 0); 1858 atomic_read(&reg->hr_steady_iterations) == 0);
1811 if (ret) { 1859 if (ret) {
1812 /* We got interrupted (hello ptrace!). Clean up */ 1860 atomic_set(&reg->hr_steady_iterations, 0);
1813 spin_lock(&o2hb_live_lock); 1861 reg->hr_aborted_start = 1;
1814 hb_task = reg->hr_task; 1862 }
1815 reg->hr_task = NULL;
1816 spin_unlock(&o2hb_live_lock);
1817 1863
1818 if (hb_task) 1864 if (reg->hr_aborted_start) {
1819 kthread_stop(hb_task); 1865 ret = -EIO;
1820 goto out; 1866 goto out;
1821 } 1867 }
1822 1868
@@ -1833,8 +1879,8 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1833 ret = -EIO; 1879 ret = -EIO;
1834 1880
1835 if (hb_task && o2hb_global_heartbeat_active()) 1881 if (hb_task && o2hb_global_heartbeat_active())
1836 printk(KERN_NOTICE "o2hb: Heartbeat started on region %s\n", 1882 printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%s)\n",
1837 config_item_name(&reg->hr_item)); 1883 config_item_name(&reg->hr_item), reg->hr_dev_name);
1838 1884
1839out: 1885out:
1840 if (filp) 1886 if (filp)
@@ -2092,13 +2138,6 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
2092 2138
2093 /* stop the thread when the user removes the region dir */ 2139 /* stop the thread when the user removes the region dir */
2094 spin_lock(&o2hb_live_lock); 2140 spin_lock(&o2hb_live_lock);
2095 if (o2hb_global_heartbeat_active()) {
2096 clear_bit(reg->hr_region_num, o2hb_region_bitmap);
2097 clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
2098 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
2099 quorum_region = 1;
2100 clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
2101 }
2102 hb_task = reg->hr_task; 2141 hb_task = reg->hr_task;
2103 reg->hr_task = NULL; 2142 reg->hr_task = NULL;
2104 reg->hr_item_dropped = 1; 2143 reg->hr_item_dropped = 1;
@@ -2107,19 +2146,30 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
2107 if (hb_task) 2146 if (hb_task)
2108 kthread_stop(hb_task); 2147 kthread_stop(hb_task);
2109 2148
2149 if (o2hb_global_heartbeat_active()) {
2150 spin_lock(&o2hb_live_lock);
2151 clear_bit(reg->hr_region_num, o2hb_region_bitmap);
2152 clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
2153 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
2154 quorum_region = 1;
2155 clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
2156 spin_unlock(&o2hb_live_lock);
2157 printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%s)\n",
2158 ((atomic_read(&reg->hr_steady_iterations) == 0) ?
2159 "stopped" : "start aborted"), config_item_name(item),
2160 reg->hr_dev_name);
2161 }
2162
2110 /* 2163 /*
2111 * If we're racing a dev_write(), we need to wake them. They will 2164 * If we're racing a dev_write(), we need to wake them. They will
2112 * check reg->hr_task 2165 * check reg->hr_task
2113 */ 2166 */
2114 if (atomic_read(&reg->hr_steady_iterations) != 0) { 2167 if (atomic_read(&reg->hr_steady_iterations) != 0) {
2168 reg->hr_aborted_start = 1;
2115 atomic_set(&reg->hr_steady_iterations, 0); 2169 atomic_set(&reg->hr_steady_iterations, 0);
2116 wake_up(&o2hb_steady_queue); 2170 wake_up(&o2hb_steady_queue);
2117 } 2171 }
2118 2172
2119 if (o2hb_global_heartbeat_active())
2120 printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n",
2121 config_item_name(&reg->hr_item));
2122
2123 config_item_put(item); 2173 config_item_put(item);
2124 2174
2125 if (!o2hb_global_heartbeat_active() || !quorum_region) 2175 if (!o2hb_global_heartbeat_active() || !quorum_region)
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index 3a5835904b3d..dc45deb19e68 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -47,6 +47,7 @@
47#define SC_DEBUG_NAME "sock_containers" 47#define SC_DEBUG_NAME "sock_containers"
48#define NST_DEBUG_NAME "send_tracking" 48#define NST_DEBUG_NAME "send_tracking"
49#define STATS_DEBUG_NAME "stats" 49#define STATS_DEBUG_NAME "stats"
50#define NODES_DEBUG_NAME "connected_nodes"
50 51
51#define SHOW_SOCK_CONTAINERS 0 52#define SHOW_SOCK_CONTAINERS 0
52#define SHOW_SOCK_STATS 1 53#define SHOW_SOCK_STATS 1
@@ -55,6 +56,7 @@ static struct dentry *o2net_dentry;
55static struct dentry *sc_dentry; 56static struct dentry *sc_dentry;
56static struct dentry *nst_dentry; 57static struct dentry *nst_dentry;
57static struct dentry *stats_dentry; 58static struct dentry *stats_dentry;
59static struct dentry *nodes_dentry;
58 60
59static DEFINE_SPINLOCK(o2net_debug_lock); 61static DEFINE_SPINLOCK(o2net_debug_lock);
60 62
@@ -491,53 +493,87 @@ static const struct file_operations sc_seq_fops = {
491 .release = sc_fop_release, 493 .release = sc_fop_release,
492}; 494};
493 495
494int o2net_debugfs_init(void) 496static int o2net_fill_bitmap(char *buf, int len)
495{ 497{
496 o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL); 498 unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
497 if (!o2net_dentry) { 499 int i = -1, out = 0;
498 mlog_errno(-ENOMEM);
499 goto bail;
500 }
501 500
502 nst_dentry = debugfs_create_file(NST_DEBUG_NAME, S_IFREG|S_IRUSR, 501 o2net_fill_node_map(map, sizeof(map));
503 o2net_dentry, NULL,
504 &nst_seq_fops);
505 if (!nst_dentry) {
506 mlog_errno(-ENOMEM);
507 goto bail;
508 }
509 502
510 sc_dentry = debugfs_create_file(SC_DEBUG_NAME, S_IFREG|S_IRUSR, 503 while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES)
511 o2net_dentry, NULL, 504 out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i);
512 &sc_seq_fops); 505 out += snprintf(buf + out, PAGE_SIZE - out, "\n");
513 if (!sc_dentry) {
514 mlog_errno(-ENOMEM);
515 goto bail;
516 }
517 506
518 stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR, 507 return out;
519 o2net_dentry, NULL, 508}
520 &stats_seq_fops); 509
521 if (!stats_dentry) { 510static int nodes_fop_open(struct inode *inode, struct file *file)
522 mlog_errno(-ENOMEM); 511{
523 goto bail; 512 char *buf;
524 } 513
514 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
515 if (!buf)
516 return -ENOMEM;
517
518 i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE));
519
520 file->private_data = buf;
525 521
526 return 0; 522 return 0;
527bail:
528 debugfs_remove(stats_dentry);
529 debugfs_remove(sc_dentry);
530 debugfs_remove(nst_dentry);
531 debugfs_remove(o2net_dentry);
532 return -ENOMEM;
533} 523}
534 524
525static int o2net_debug_release(struct inode *inode, struct file *file)
526{
527 kfree(file->private_data);
528 return 0;
529}
530
531static ssize_t o2net_debug_read(struct file *file, char __user *buf,
532 size_t nbytes, loff_t *ppos)
533{
534 return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
535 i_size_read(file->f_mapping->host));
536}
537
538static const struct file_operations nodes_fops = {
539 .open = nodes_fop_open,
540 .release = o2net_debug_release,
541 .read = o2net_debug_read,
542 .llseek = generic_file_llseek,
543};
544
535void o2net_debugfs_exit(void) 545void o2net_debugfs_exit(void)
536{ 546{
547 debugfs_remove(nodes_dentry);
537 debugfs_remove(stats_dentry); 548 debugfs_remove(stats_dentry);
538 debugfs_remove(sc_dentry); 549 debugfs_remove(sc_dentry);
539 debugfs_remove(nst_dentry); 550 debugfs_remove(nst_dentry);
540 debugfs_remove(o2net_dentry); 551 debugfs_remove(o2net_dentry);
541} 552}
542 553
554int o2net_debugfs_init(void)
555{
556 mode_t mode = S_IFREG|S_IRUSR;
557
558 o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
559 if (o2net_dentry)
560 nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode,
561 o2net_dentry, NULL, &nst_seq_fops);
562 if (nst_dentry)
563 sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode,
564 o2net_dentry, NULL, &sc_seq_fops);
565 if (sc_dentry)
566 stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode,
567 o2net_dentry, NULL, &stats_seq_fops);
568 if (stats_dentry)
569 nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode,
570 o2net_dentry, NULL, &nodes_fops);
571 if (nodes_dentry)
572 return 0;
573
574 o2net_debugfs_exit();
575 mlog_errno(-ENOMEM);
576 return -ENOMEM;
577}
578
543#endif /* CONFIG_DEBUG_FS */ 579#endif /* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index ad7d0c155de4..044e7b58d31c 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -546,7 +546,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
546 } 546 }
547 547
548 if (was_valid && !valid) { 548 if (was_valid && !valid) {
549 printk(KERN_NOTICE "o2net: no longer connected to " 549 printk(KERN_NOTICE "o2net: No longer connected to "
550 SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); 550 SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
551 o2net_complete_nodes_nsw(nn); 551 o2net_complete_nodes_nsw(nn);
552 } 552 }
@@ -556,7 +556,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
556 cancel_delayed_work(&nn->nn_connect_expired); 556 cancel_delayed_work(&nn->nn_connect_expired);
557 printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n", 557 printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
558 o2nm_this_node() > sc->sc_node->nd_num ? 558 o2nm_this_node() > sc->sc_node->nd_num ?
559 "connected to" : "accepted connection from", 559 "Connected to" : "Accepted connection from",
560 SC_NODEF_ARGS(sc)); 560 SC_NODEF_ARGS(sc));
561 } 561 }
562 562
@@ -644,7 +644,7 @@ static void o2net_state_change(struct sock *sk)
644 o2net_sc_queue_work(sc, &sc->sc_connect_work); 644 o2net_sc_queue_work(sc, &sc->sc_connect_work);
645 break; 645 break;
646 default: 646 default:
647 printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT 647 printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT
648 " shutdown, state %d\n", 648 " shutdown, state %d\n",
649 SC_NODEF_ARGS(sc), sk->sk_state); 649 SC_NODEF_ARGS(sc), sk->sk_state);
650 o2net_sc_queue_work(sc, &sc->sc_shutdown_work); 650 o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
@@ -1035,6 +1035,25 @@ static int o2net_tx_can_proceed(struct o2net_node *nn,
1035 return ret; 1035 return ret;
1036} 1036}
1037 1037
1038/* Get a map of all nodes to which this node is currently connected to */
1039void o2net_fill_node_map(unsigned long *map, unsigned bytes)
1040{
1041 struct o2net_sock_container *sc;
1042 int node, ret;
1043
1044 BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
1045
1046 memset(map, 0, bytes);
1047 for (node = 0; node < O2NM_MAX_NODES; ++node) {
1048 o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret);
1049 if (!ret) {
1050 set_bit(node, map);
1051 sc_put(sc);
1052 }
1053 }
1054}
1055EXPORT_SYMBOL_GPL(o2net_fill_node_map);
1056
1038int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, 1057int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
1039 size_t caller_veclen, u8 target_node, int *status) 1058 size_t caller_veclen, u8 target_node, int *status)
1040{ 1059{
@@ -1285,11 +1304,11 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
1285 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); 1304 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
1286 1305
1287 if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) { 1306 if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) {
1288 mlog(ML_NOTICE, SC_NODEF_FMT " advertised net protocol " 1307 printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " Advertised net "
1289 "version %llu but %llu is required, disconnecting\n", 1308 "protocol version %llu but %llu is required. "
1290 SC_NODEF_ARGS(sc), 1309 "Disconnecting.\n", SC_NODEF_ARGS(sc),
1291 (unsigned long long)be64_to_cpu(hand->protocol_version), 1310 (unsigned long long)be64_to_cpu(hand->protocol_version),
1292 O2NET_PROTOCOL_VERSION); 1311 O2NET_PROTOCOL_VERSION);
1293 1312
1294 /* don't bother reconnecting if its the wrong version. */ 1313 /* don't bother reconnecting if its the wrong version. */
1295 o2net_ensure_shutdown(nn, sc, -ENOTCONN); 1314 o2net_ensure_shutdown(nn, sc, -ENOTCONN);
@@ -1303,33 +1322,33 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
1303 */ 1322 */
1304 if (be32_to_cpu(hand->o2net_idle_timeout_ms) != 1323 if (be32_to_cpu(hand->o2net_idle_timeout_ms) !=
1305 o2net_idle_timeout()) { 1324 o2net_idle_timeout()) {
1306 mlog(ML_NOTICE, SC_NODEF_FMT " uses a network idle timeout of " 1325 printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a network "
1307 "%u ms, but we use %u ms locally. disconnecting\n", 1326 "idle timeout of %u ms, but we use %u ms locally. "
1308 SC_NODEF_ARGS(sc), 1327 "Disconnecting.\n", SC_NODEF_ARGS(sc),
1309 be32_to_cpu(hand->o2net_idle_timeout_ms), 1328 be32_to_cpu(hand->o2net_idle_timeout_ms),
1310 o2net_idle_timeout()); 1329 o2net_idle_timeout());
1311 o2net_ensure_shutdown(nn, sc, -ENOTCONN); 1330 o2net_ensure_shutdown(nn, sc, -ENOTCONN);
1312 return -1; 1331 return -1;
1313 } 1332 }
1314 1333
1315 if (be32_to_cpu(hand->o2net_keepalive_delay_ms) != 1334 if (be32_to_cpu(hand->o2net_keepalive_delay_ms) !=
1316 o2net_keepalive_delay()) { 1335 o2net_keepalive_delay()) {
1317 mlog(ML_NOTICE, SC_NODEF_FMT " uses a keepalive delay of " 1336 printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a keepalive "
1318 "%u ms, but we use %u ms locally. disconnecting\n", 1337 "delay of %u ms, but we use %u ms locally. "
1319 SC_NODEF_ARGS(sc), 1338 "Disconnecting.\n", SC_NODEF_ARGS(sc),
1320 be32_to_cpu(hand->o2net_keepalive_delay_ms), 1339 be32_to_cpu(hand->o2net_keepalive_delay_ms),
1321 o2net_keepalive_delay()); 1340 o2net_keepalive_delay());
1322 o2net_ensure_shutdown(nn, sc, -ENOTCONN); 1341 o2net_ensure_shutdown(nn, sc, -ENOTCONN);
1323 return -1; 1342 return -1;
1324 } 1343 }
1325 1344
1326 if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) != 1345 if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) !=
1327 O2HB_MAX_WRITE_TIMEOUT_MS) { 1346 O2HB_MAX_WRITE_TIMEOUT_MS) {
1328 mlog(ML_NOTICE, SC_NODEF_FMT " uses a heartbeat timeout of " 1347 printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a heartbeat "
1329 "%u ms, but we use %u ms locally. disconnecting\n", 1348 "timeout of %u ms, but we use %u ms locally. "
1330 SC_NODEF_ARGS(sc), 1349 "Disconnecting.\n", SC_NODEF_ARGS(sc),
1331 be32_to_cpu(hand->o2hb_heartbeat_timeout_ms), 1350 be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
1332 O2HB_MAX_WRITE_TIMEOUT_MS); 1351 O2HB_MAX_WRITE_TIMEOUT_MS);
1333 o2net_ensure_shutdown(nn, sc, -ENOTCONN); 1352 o2net_ensure_shutdown(nn, sc, -ENOTCONN);
1334 return -1; 1353 return -1;
1335 } 1354 }
@@ -1540,28 +1559,16 @@ static void o2net_idle_timer(unsigned long data)
1540{ 1559{
1541 struct o2net_sock_container *sc = (struct o2net_sock_container *)data; 1560 struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
1542 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); 1561 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
1543
1544#ifdef CONFIG_DEBUG_FS 1562#ifdef CONFIG_DEBUG_FS
1545 ktime_t now = ktime_get(); 1563 unsigned long msecs = ktime_to_ms(ktime_get()) -
1564 ktime_to_ms(sc->sc_tv_timer);
1565#else
1566 unsigned long msecs = o2net_idle_timeout();
1546#endif 1567#endif
1547 1568
1548 printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " 1569 printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been "
1549 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), 1570 "idle for %lu.%lu secs, shutting it down.\n", SC_NODEF_ARGS(sc),
1550 o2net_idle_timeout() / 1000, 1571 msecs / 1000, msecs % 1000);
1551 o2net_idle_timeout() % 1000);
1552
1553#ifdef CONFIG_DEBUG_FS
1554 mlog(ML_NOTICE, "Here are some times that might help debug the "
1555 "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, "
1556 "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n",
1557 (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now),
1558 (long long)ktime_to_us(sc->sc_tv_data_ready),
1559 (long long)ktime_to_us(sc->sc_tv_advance_start),
1560 (long long)ktime_to_us(sc->sc_tv_advance_stop),
1561 sc->sc_msg_key, sc->sc_msg_type,
1562 (long long)ktime_to_us(sc->sc_tv_func_start),
1563 (long long)ktime_to_us(sc->sc_tv_func_stop));
1564#endif
1565 1572
1566 /* 1573 /*
1567 * Initialize the nn_timeout so that the next connection attempt 1574 * Initialize the nn_timeout so that the next connection attempt
@@ -1694,8 +1701,8 @@ static void o2net_start_connect(struct work_struct *work)
1694 1701
1695out: 1702out:
1696 if (ret) { 1703 if (ret) {
1697 mlog(ML_NOTICE, "connect attempt to " SC_NODEF_FMT " failed " 1704 printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT
1698 "with errno %d\n", SC_NODEF_ARGS(sc), ret); 1705 " failed with errno %d\n", SC_NODEF_ARGS(sc), ret);
1699 /* 0 err so that another will be queued and attempted 1706 /* 0 err so that another will be queued and attempted
1700 * from set_nn_state */ 1707 * from set_nn_state */
1701 if (sc) 1708 if (sc)
@@ -1718,8 +1725,8 @@ static void o2net_connect_expired(struct work_struct *work)
1718 1725
1719 spin_lock(&nn->nn_lock); 1726 spin_lock(&nn->nn_lock);
1720 if (!nn->nn_sc_valid) { 1727 if (!nn->nn_sc_valid) {
1721 mlog(ML_ERROR, "no connection established with node %u after " 1728 printk(KERN_NOTICE "o2net: No connection established with "
1722 "%u.%u seconds, giving up and returning errors.\n", 1729 "node %u after %u.%u seconds, giving up.\n",
1723 o2net_num_from_nn(nn), 1730 o2net_num_from_nn(nn),
1724 o2net_idle_timeout() / 1000, 1731 o2net_idle_timeout() / 1000,
1725 o2net_idle_timeout() % 1000); 1732 o2net_idle_timeout() % 1000);
@@ -1862,21 +1869,21 @@ static int o2net_accept_one(struct socket *sock)
1862 1869
1863 node = o2nm_get_node_by_ip(sin.sin_addr.s_addr); 1870 node = o2nm_get_node_by_ip(sin.sin_addr.s_addr);
1864 if (node == NULL) { 1871 if (node == NULL) {
1865 mlog(ML_NOTICE, "attempt to connect from unknown node at %pI4:%d\n", 1872 printk(KERN_NOTICE "o2net: Attempt to connect from unknown "
1866 &sin.sin_addr.s_addr, ntohs(sin.sin_port)); 1873 "node at %pI4:%d\n", &sin.sin_addr.s_addr,
1874 ntohs(sin.sin_port));
1867 ret = -EINVAL; 1875 ret = -EINVAL;
1868 goto out; 1876 goto out;
1869 } 1877 }
1870 1878
1871 if (o2nm_this_node() >= node->nd_num) { 1879 if (o2nm_this_node() >= node->nd_num) {
1872 local_node = o2nm_get_node_by_num(o2nm_this_node()); 1880 local_node = o2nm_get_node_by_num(o2nm_this_node());
1873 mlog(ML_NOTICE, "unexpected connect attempt seen at node '%s' (" 1881 printk(KERN_NOTICE "o2net: Unexpected connect attempt seen "
1874 "%u, %pI4:%d) from node '%s' (%u, %pI4:%d)\n", 1882 "at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
1875 local_node->nd_name, local_node->nd_num, 1883 "%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
1876 &(local_node->nd_ipv4_address), 1884 &(local_node->nd_ipv4_address),
1877 ntohs(local_node->nd_ipv4_port), 1885 ntohs(local_node->nd_ipv4_port), node->nd_name,
1878 node->nd_name, node->nd_num, &sin.sin_addr.s_addr, 1886 node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
1879 ntohs(sin.sin_port));
1880 ret = -EINVAL; 1887 ret = -EINVAL;
1881 goto out; 1888 goto out;
1882 } 1889 }
@@ -1901,10 +1908,10 @@ static int o2net_accept_one(struct socket *sock)
1901 ret = 0; 1908 ret = 0;
1902 spin_unlock(&nn->nn_lock); 1909 spin_unlock(&nn->nn_lock);
1903 if (ret) { 1910 if (ret) {
1904 mlog(ML_NOTICE, "attempt to connect from node '%s' at " 1911 printk(KERN_NOTICE "o2net: Attempt to connect from node '%s' "
1905 "%pI4:%d but it already has an open connection\n", 1912 "at %pI4:%d but it already has an open connection\n",
1906 node->nd_name, &sin.sin_addr.s_addr, 1913 node->nd_name, &sin.sin_addr.s_addr,
1907 ntohs(sin.sin_port)); 1914 ntohs(sin.sin_port));
1908 goto out; 1915 goto out;
1909 } 1916 }
1910 1917
@@ -1984,7 +1991,7 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
1984 1991
1985 ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); 1992 ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
1986 if (ret < 0) { 1993 if (ret < 0) {
1987 mlog(ML_ERROR, "unable to create socket, ret=%d\n", ret); 1994 printk(KERN_ERR "o2net: Error %d while creating socket\n", ret);
1988 goto out; 1995 goto out;
1989 } 1996 }
1990 1997
@@ -2001,16 +2008,15 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
2001 sock->sk->sk_reuse = 1; 2008 sock->sk->sk_reuse = 1;
2002 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); 2009 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
2003 if (ret < 0) { 2010 if (ret < 0) {
2004 mlog(ML_ERROR, "unable to bind socket at %pI4:%u, " 2011 printk(KERN_ERR "o2net: Error %d while binding socket at "
2005 "ret=%d\n", &addr, ntohs(port), ret); 2012 "%pI4:%u\n", ret, &addr, ntohs(port));
2006 goto out; 2013 goto out;
2007 } 2014 }
2008 2015
2009 ret = sock->ops->listen(sock, 64); 2016 ret = sock->ops->listen(sock, 64);
2010 if (ret < 0) { 2017 if (ret < 0)
2011 mlog(ML_ERROR, "unable to listen on %pI4:%u, ret=%d\n", 2018 printk(KERN_ERR "o2net: Error %d while listening on %pI4:%u\n",
2012 &addr, ntohs(port), ret); 2019 ret, &addr, ntohs(port));
2013 }
2014 2020
2015out: 2021out:
2016 if (ret) { 2022 if (ret) {
diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h
index fd6179eb26d4..5bada2a69b50 100644
--- a/fs/ocfs2/cluster/tcp.h
+++ b/fs/ocfs2/cluster/tcp.h
@@ -106,6 +106,8 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
106 struct list_head *unreg_list); 106 struct list_head *unreg_list);
107void o2net_unregister_handler_list(struct list_head *list); 107void o2net_unregister_handler_list(struct list_head *list);
108 108
109void o2net_fill_node_map(unsigned long *map, unsigned bytes);
110
109struct o2nm_node; 111struct o2nm_node;
110int o2net_register_hb_callbacks(void); 112int o2net_register_hb_callbacks(void);
111void o2net_unregister_hb_callbacks(void); 113void o2net_unregister_hb_callbacks(void);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index e2878b5895fb..8fe4e2892ab9 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1184,8 +1184,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
1184 if (pde) 1184 if (pde)
1185 le16_add_cpu(&pde->rec_len, 1185 le16_add_cpu(&pde->rec_len,
1186 le16_to_cpu(de->rec_len)); 1186 le16_to_cpu(de->rec_len));
1187 else 1187 de->inode = 0;
1188 de->inode = 0;
1189 dir->i_version++; 1188 dir->i_version++;
1190 ocfs2_journal_dirty(handle, bh); 1189 ocfs2_journal_dirty(handle, bh);
1191 goto bail; 1190 goto bail;
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index d602abb51b61..a5952ceecba5 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -859,8 +859,8 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
859void dlm_wait_for_recovery(struct dlm_ctxt *dlm); 859void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
860void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); 860void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
861int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); 861int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
862int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); 862void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
863int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout); 863void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
864 864
865void dlm_put(struct dlm_ctxt *dlm); 865void dlm_put(struct dlm_ctxt *dlm);
866struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); 866struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
@@ -877,9 +877,8 @@ static inline void dlm_lockres_get(struct dlm_lock_resource *res)
877 kref_get(&res->refs); 877 kref_get(&res->refs);
878} 878}
879void dlm_lockres_put(struct dlm_lock_resource *res); 879void dlm_lockres_put(struct dlm_lock_resource *res);
880void __dlm_unhash_lockres(struct dlm_lock_resource *res); 880void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
881void __dlm_insert_lockres(struct dlm_ctxt *dlm, 881void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
882 struct dlm_lock_resource *res);
883struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, 882struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
884 const char *name, 883 const char *name,
885 unsigned int len, 884 unsigned int len,
@@ -902,46 +901,15 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
902 const char *name, 901 const char *name,
903 unsigned int namelen); 902 unsigned int namelen);
904 903
905#define dlm_lockres_set_refmap_bit(bit,res) \ 904void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
906 __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__) 905 struct dlm_lock_resource *res, int bit);
907#define dlm_lockres_clear_refmap_bit(bit,res) \ 906void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
908 __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__) 907 struct dlm_lock_resource *res, int bit);
909 908
910static inline void __dlm_lockres_set_refmap_bit(int bit, 909void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
911 struct dlm_lock_resource *res, 910 struct dlm_lock_resource *res);
912 const char *file, 911void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
913 int line) 912 struct dlm_lock_resource *res);
914{
915 //printk("%s:%d:%.*s: setting bit %d\n", file, line,
916 // res->lockname.len, res->lockname.name, bit);
917 set_bit(bit, res->refmap);
918}
919
920static inline void __dlm_lockres_clear_refmap_bit(int bit,
921 struct dlm_lock_resource *res,
922 const char *file,
923 int line)
924{
925 //printk("%s:%d:%.*s: clearing bit %d\n", file, line,
926 // res->lockname.len, res->lockname.name, bit);
927 clear_bit(bit, res->refmap);
928}
929
930void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
931 struct dlm_lock_resource *res,
932 const char *file,
933 int line);
934void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
935 struct dlm_lock_resource *res,
936 int new_lockres,
937 const char *file,
938 int line);
939#define dlm_lockres_drop_inflight_ref(d,r) \
940 __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__)
941#define dlm_lockres_grab_inflight_ref(d,r) \
942 __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__)
943#define dlm_lockres_grab_inflight_ref_new(d,r) \
944 __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__)
945 913
946void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 914void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
947void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 915void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 6ed6b95dcf93..92f2ead0fab6 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -157,16 +157,18 @@ static int dlm_protocol_compare(struct dlm_protocol_version *existing,
157 157
158static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); 158static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
159 159
160void __dlm_unhash_lockres(struct dlm_lock_resource *lockres) 160void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
161{ 161{
162 if (!hlist_unhashed(&lockres->hash_node)) { 162 if (hlist_unhashed(&res->hash_node))
163 hlist_del_init(&lockres->hash_node); 163 return;
164 dlm_lockres_put(lockres); 164
165 } 165 mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len,
166 res->lockname.name);
167 hlist_del_init(&res->hash_node);
168 dlm_lockres_put(res);
166} 169}
167 170
168void __dlm_insert_lockres(struct dlm_ctxt *dlm, 171void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
169 struct dlm_lock_resource *res)
170{ 172{
171 struct hlist_head *bucket; 173 struct hlist_head *bucket;
172 struct qstr *q; 174 struct qstr *q;
@@ -180,6 +182,9 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm,
180 dlm_lockres_get(res); 182 dlm_lockres_get(res);
181 183
182 hlist_add_head(&res->hash_node, bucket); 184 hlist_add_head(&res->hash_node, bucket);
185
186 mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len,
187 res->lockname.name);
183} 188}
184 189
185struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, 190struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
@@ -539,17 +544,17 @@ again:
539 544
540static void __dlm_print_nodes(struct dlm_ctxt *dlm) 545static void __dlm_print_nodes(struct dlm_ctxt *dlm)
541{ 546{
542 int node = -1; 547 int node = -1, num = 0;
543 548
544 assert_spin_locked(&dlm->spinlock); 549 assert_spin_locked(&dlm->spinlock);
545 550
546 printk(KERN_NOTICE "o2dlm: Nodes in domain %s: ", dlm->name); 551 printk("( ");
547
548 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 552 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
549 node + 1)) < O2NM_MAX_NODES) { 553 node + 1)) < O2NM_MAX_NODES) {
550 printk("%d ", node); 554 printk("%d ", node);
555 ++num;
551 } 556 }
552 printk("\n"); 557 printk(") %u nodes\n", num);
553} 558}
554 559
555static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, 560static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -566,11 +571,10 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
566 571
567 node = exit_msg->node_idx; 572 node = exit_msg->node_idx;
568 573
569 printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name);
570
571 spin_lock(&dlm->spinlock); 574 spin_lock(&dlm->spinlock);
572 clear_bit(node, dlm->domain_map); 575 clear_bit(node, dlm->domain_map);
573 clear_bit(node, dlm->exit_domain_map); 576 clear_bit(node, dlm->exit_domain_map);
577 printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name);
574 __dlm_print_nodes(dlm); 578 __dlm_print_nodes(dlm);
575 579
576 /* notify anything attached to the heartbeat events */ 580 /* notify anything attached to the heartbeat events */
@@ -755,6 +759,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
755 759
756 dlm_mark_domain_leaving(dlm); 760 dlm_mark_domain_leaving(dlm);
757 dlm_leave_domain(dlm); 761 dlm_leave_domain(dlm);
762 printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name);
758 dlm_force_free_mles(dlm); 763 dlm_force_free_mles(dlm);
759 dlm_complete_dlm_shutdown(dlm); 764 dlm_complete_dlm_shutdown(dlm);
760 } 765 }
@@ -970,7 +975,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
970 clear_bit(assert->node_idx, dlm->exit_domain_map); 975 clear_bit(assert->node_idx, dlm->exit_domain_map);
971 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); 976 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
972 977
973 printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n", 978 printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ",
974 assert->node_idx, dlm->name); 979 assert->node_idx, dlm->name);
975 __dlm_print_nodes(dlm); 980 __dlm_print_nodes(dlm);
976 981
@@ -1701,8 +1706,10 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
1701bail: 1706bail:
1702 spin_lock(&dlm->spinlock); 1707 spin_lock(&dlm->spinlock);
1703 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); 1708 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
1704 if (!status) 1709 if (!status) {
1710 printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name);
1705 __dlm_print_nodes(dlm); 1711 __dlm_print_nodes(dlm);
1712 }
1706 spin_unlock(&dlm->spinlock); 1713 spin_unlock(&dlm->spinlock);
1707 1714
1708 if (ctxt) { 1715 if (ctxt) {
@@ -2131,13 +2138,6 @@ struct dlm_ctxt * dlm_register_domain(const char *domain,
2131 goto leave; 2138 goto leave;
2132 } 2139 }
2133 2140
2134 if (!o2hb_check_local_node_heartbeating()) {
2135 mlog(ML_ERROR, "the local node has not been configured, or is "
2136 "not heartbeating\n");
2137 ret = -EPROTO;
2138 goto leave;
2139 }
2140
2141 mlog(0, "register called for domain \"%s\"\n", domain); 2141 mlog(0, "register called for domain \"%s\"\n", domain);
2142 2142
2143retry: 2143retry:
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 8d39e0fd66f7..975810b98492 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -183,10 +183,6 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
183 kick_thread = 1; 183 kick_thread = 1;
184 } 184 }
185 } 185 }
186 /* reduce the inflight count, this may result in the lockres
187 * being purged below during calc_usage */
188 if (lock->ml.node == dlm->node_num)
189 dlm_lockres_drop_inflight_ref(dlm, res);
190 186
191 spin_unlock(&res->spinlock); 187 spin_unlock(&res->spinlock);
192 wake_up(&res->wq); 188 wake_up(&res->wq);
@@ -231,10 +227,16 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
231 lock->ml.type, res->lockname.len, 227 lock->ml.type, res->lockname.len,
232 res->lockname.name, flags); 228 res->lockname.name, flags);
233 229
230 /*
231 * Wait if resource is getting recovered, remastered, etc.
232 * If the resource was remastered and new owner is self, then exit.
233 */
234 spin_lock(&res->spinlock); 234 spin_lock(&res->spinlock);
235
236 /* will exit this call with spinlock held */
237 __dlm_wait_on_lockres(res); 235 __dlm_wait_on_lockres(res);
236 if (res->owner == dlm->node_num) {
237 spin_unlock(&res->spinlock);
238 return DLM_RECOVERING;
239 }
238 res->state |= DLM_LOCK_RES_IN_PROGRESS; 240 res->state |= DLM_LOCK_RES_IN_PROGRESS;
239 241
240 /* add lock to local (secondary) queue */ 242 /* add lock to local (secondary) queue */
@@ -319,27 +321,23 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
319 tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, 321 tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
320 sizeof(create), res->owner, &status); 322 sizeof(create), res->owner, &status);
321 if (tmpret >= 0) { 323 if (tmpret >= 0) {
322 // successfully sent and received 324 ret = status;
323 ret = status; // this is already a dlm_status
324 if (ret == DLM_REJECTED) { 325 if (ret == DLM_REJECTED) {
325 mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres " 326 mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer "
326 "no longer owned by %u. that node is coming back " 327 "owned by node %u. That node is coming back up "
327 "up currently.\n", dlm->name, create.namelen, 328 "currently.\n", dlm->name, create.namelen,
328 create.name, res->owner); 329 create.name, res->owner);
329 dlm_print_one_lock_resource(res); 330 dlm_print_one_lock_resource(res);
330 BUG(); 331 BUG();
331 } 332 }
332 } else { 333 } else {
333 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " 334 mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to "
334 "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key, 335 "node %u\n", dlm->name, create.namelen, create.name,
335 res->owner); 336 tmpret, res->owner);
336 if (dlm_is_host_down(tmpret)) { 337 if (dlm_is_host_down(tmpret))
337 ret = DLM_RECOVERING; 338 ret = DLM_RECOVERING;
338 mlog(0, "node %u died so returning DLM_RECOVERING " 339 else
339 "from lock message!\n", res->owner);
340 } else {
341 ret = dlm_err_to_dlm_status(tmpret); 340 ret = dlm_err_to_dlm_status(tmpret);
342 }
343 } 341 }
344 342
345 return ret; 343 return ret;
@@ -440,7 +438,7 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
440 /* zero memory only if kernel-allocated */ 438 /* zero memory only if kernel-allocated */
441 lksb = kzalloc(sizeof(*lksb), GFP_NOFS); 439 lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
442 if (!lksb) { 440 if (!lksb) {
443 kfree(lock); 441 kmem_cache_free(dlm_lock_cache, lock);
444 return NULL; 442 return NULL;
445 } 443 }
446 kernel_allocated = 1; 444 kernel_allocated = 1;
@@ -718,18 +716,10 @@ retry_lock:
718 716
719 if (status == DLM_RECOVERING || status == DLM_MIGRATING || 717 if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
720 status == DLM_FORWARD) { 718 status == DLM_FORWARD) {
721 mlog(0, "retrying lock with migration/"
722 "recovery/in progress\n");
723 msleep(100); 719 msleep(100);
724 /* no waiting for dlm_reco_thread */
725 if (recovery) { 720 if (recovery) {
726 if (status != DLM_RECOVERING) 721 if (status != DLM_RECOVERING)
727 goto retry_lock; 722 goto retry_lock;
728
729 mlog(0, "%s: got RECOVERING "
730 "for $RECOVERY lock, master "
731 "was %u\n", dlm->name,
732 res->owner);
733 /* wait to see the node go down, then 723 /* wait to see the node go down, then
734 * drop down and allow the lockres to 724 * drop down and allow the lockres to
735 * get cleaned up. need to remaster. */ 725 * get cleaned up. need to remaster. */
@@ -741,6 +731,14 @@ retry_lock:
741 } 731 }
742 } 732 }
743 733
734 /* Inflight taken in dlm_get_lock_resource() is dropped here */
735 spin_lock(&res->spinlock);
736 dlm_lockres_drop_inflight_ref(dlm, res);
737 spin_unlock(&res->spinlock);
738
739 dlm_lockres_calc_usage(dlm, res);
740 dlm_kick_thread(dlm, res);
741
744 if (status != DLM_NORMAL) { 742 if (status != DLM_NORMAL) {
745 lock->lksb->flags &= ~DLM_LKSB_GET_LVB; 743 lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
746 if (status != DLM_NOTQUEUED) 744 if (status != DLM_NOTQUEUED)
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 11eefb8c12e9..005261c333b0 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -631,39 +631,54 @@ error:
631 return NULL; 631 return NULL;
632} 632}
633 633
634void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 634void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
635 struct dlm_lock_resource *res, 635 struct dlm_lock_resource *res, int bit)
636 int new_lockres,
637 const char *file,
638 int line)
639{ 636{
640 if (!new_lockres) 637 assert_spin_locked(&res->spinlock);
641 assert_spin_locked(&res->spinlock); 638
639 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
640 res->lockname.name, bit, __builtin_return_address(0));
641
642 set_bit(bit, res->refmap);
643}
644
645void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
646 struct dlm_lock_resource *res, int bit)
647{
648 assert_spin_locked(&res->spinlock);
649
650 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
651 res->lockname.name, bit, __builtin_return_address(0));
652
653 clear_bit(bit, res->refmap);
654}
655
656
657void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
658 struct dlm_lock_resource *res)
659{
660 assert_spin_locked(&res->spinlock);
642 661
643 if (!test_bit(dlm->node_num, res->refmap)) {
644 BUG_ON(res->inflight_locks != 0);
645 dlm_lockres_set_refmap_bit(dlm->node_num, res);
646 }
647 res->inflight_locks++; 662 res->inflight_locks++;
648 mlog(0, "%s:%.*s: inflight++: now %u\n", 663
649 dlm->name, res->lockname.len, res->lockname.name, 664 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
650 res->inflight_locks); 665 res->lockname.len, res->lockname.name, res->inflight_locks,
666 __builtin_return_address(0));
651} 667}
652 668
653void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, 669void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
654 struct dlm_lock_resource *res, 670 struct dlm_lock_resource *res)
655 const char *file,
656 int line)
657{ 671{
658 assert_spin_locked(&res->spinlock); 672 assert_spin_locked(&res->spinlock);
659 673
660 BUG_ON(res->inflight_locks == 0); 674 BUG_ON(res->inflight_locks == 0);
675
661 res->inflight_locks--; 676 res->inflight_locks--;
662 mlog(0, "%s:%.*s: inflight--: now %u\n", 677
663 dlm->name, res->lockname.len, res->lockname.name, 678 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
664 res->inflight_locks); 679 res->lockname.len, res->lockname.name, res->inflight_locks,
665 if (res->inflight_locks == 0) 680 __builtin_return_address(0));
666 dlm_lockres_clear_refmap_bit(dlm->node_num, res); 681
667 wake_up(&res->wq); 682 wake_up(&res->wq);
668} 683}
669 684
@@ -697,7 +712,6 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
697 unsigned int hash; 712 unsigned int hash;
698 int tries = 0; 713 int tries = 0;
699 int bit, wait_on_recovery = 0; 714 int bit, wait_on_recovery = 0;
700 int drop_inflight_if_nonlocal = 0;
701 715
702 BUG_ON(!lockid); 716 BUG_ON(!lockid);
703 717
@@ -709,36 +723,33 @@ lookup:
709 spin_lock(&dlm->spinlock); 723 spin_lock(&dlm->spinlock);
710 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); 724 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
711 if (tmpres) { 725 if (tmpres) {
712 int dropping_ref = 0;
713
714 spin_unlock(&dlm->spinlock); 726 spin_unlock(&dlm->spinlock);
715
716 spin_lock(&tmpres->spinlock); 727 spin_lock(&tmpres->spinlock);
717 /* We wait for the other thread that is mastering the resource */ 728 /* Wait on the thread that is mastering the resource */
718 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 729 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
719 __dlm_wait_on_lockres(tmpres); 730 __dlm_wait_on_lockres(tmpres);
720 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); 731 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
732 spin_unlock(&tmpres->spinlock);
733 dlm_lockres_put(tmpres);
734 tmpres = NULL;
735 goto lookup;
721 } 736 }
722 737
723 if (tmpres->owner == dlm->node_num) { 738 /* Wait on the resource purge to complete before continuing */
724 BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF); 739 if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
725 dlm_lockres_grab_inflight_ref(dlm, tmpres); 740 BUG_ON(tmpres->owner == dlm->node_num);
726 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) 741 __dlm_wait_on_lockres_flags(tmpres,
727 dropping_ref = 1; 742 DLM_LOCK_RES_DROPPING_REF);
728 spin_unlock(&tmpres->spinlock);
729
730 /* wait until done messaging the master, drop our ref to allow
731 * the lockres to be purged, start over. */
732 if (dropping_ref) {
733 spin_lock(&tmpres->spinlock);
734 __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
735 spin_unlock(&tmpres->spinlock); 743 spin_unlock(&tmpres->spinlock);
736 dlm_lockres_put(tmpres); 744 dlm_lockres_put(tmpres);
737 tmpres = NULL; 745 tmpres = NULL;
738 goto lookup; 746 goto lookup;
739 } 747 }
740 748
741 mlog(0, "found in hash!\n"); 749 /* Grab inflight ref to pin the resource */
750 dlm_lockres_grab_inflight_ref(dlm, tmpres);
751
752 spin_unlock(&tmpres->spinlock);
742 if (res) 753 if (res)
743 dlm_lockres_put(res); 754 dlm_lockres_put(res);
744 res = tmpres; 755 res = tmpres;
@@ -829,8 +840,8 @@ lookup:
829 * but they might own this lockres. wait on them. */ 840 * but they might own this lockres. wait on them. */
830 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 841 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
831 if (bit < O2NM_MAX_NODES) { 842 if (bit < O2NM_MAX_NODES) {
832 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " 843 mlog(0, "%s: res %.*s, At least one node (%d) "
833 "recover before lock mastery can begin\n", 844 "to recover before lock mastery can begin\n",
834 dlm->name, namelen, (char *)lockid, bit); 845 dlm->name, namelen, (char *)lockid, bit);
835 wait_on_recovery = 1; 846 wait_on_recovery = 1;
836 } 847 }
@@ -843,12 +854,11 @@ lookup:
843 854
844 /* finally add the lockres to its hash bucket */ 855 /* finally add the lockres to its hash bucket */
845 __dlm_insert_lockres(dlm, res); 856 __dlm_insert_lockres(dlm, res);
846 /* since this lockres is new it doesn't not require the spinlock */
847 dlm_lockres_grab_inflight_ref_new(dlm, res);
848 857
849 /* if this node does not become the master make sure to drop 858 /* Grab inflight ref to pin the resource */
850 * this inflight reference below */ 859 spin_lock(&res->spinlock);
851 drop_inflight_if_nonlocal = 1; 860 dlm_lockres_grab_inflight_ref(dlm, res);
861 spin_unlock(&res->spinlock);
852 862
853 /* get an extra ref on the mle in case this is a BLOCK 863 /* get an extra ref on the mle in case this is a BLOCK
854 * if so, the creator of the BLOCK may try to put the last 864 * if so, the creator of the BLOCK may try to put the last
@@ -864,8 +874,8 @@ redo_request:
864 * dlm spinlock would be detectable be a change on the mle, 874 * dlm spinlock would be detectable be a change on the mle,
865 * so we only need to clear out the recovery map once. */ 875 * so we only need to clear out the recovery map once. */
866 if (dlm_is_recovery_lock(lockid, namelen)) { 876 if (dlm_is_recovery_lock(lockid, namelen)) {
867 mlog(ML_NOTICE, "%s: recovery map is not empty, but " 877 mlog(0, "%s: Recovery map is not empty, but must "
868 "must master $RECOVERY lock now\n", dlm->name); 878 "master $RECOVERY lock now\n", dlm->name);
869 if (!dlm_pre_master_reco_lockres(dlm, res)) 879 if (!dlm_pre_master_reco_lockres(dlm, res))
870 wait_on_recovery = 0; 880 wait_on_recovery = 0;
871 else { 881 else {
@@ -883,8 +893,8 @@ redo_request:
883 spin_lock(&dlm->spinlock); 893 spin_lock(&dlm->spinlock);
884 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 894 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
885 if (bit < O2NM_MAX_NODES) { 895 if (bit < O2NM_MAX_NODES) {
886 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " 896 mlog(0, "%s: res %.*s, At least one node (%d) "
887 "recover before lock mastery can begin\n", 897 "to recover before lock mastery can begin\n",
888 dlm->name, namelen, (char *)lockid, bit); 898 dlm->name, namelen, (char *)lockid, bit);
889 wait_on_recovery = 1; 899 wait_on_recovery = 1;
890 } else 900 } else
@@ -913,8 +923,8 @@ redo_request:
913 * yet, keep going until it does. this is how the 923 * yet, keep going until it does. this is how the
914 * master will know that asserts are needed back to 924 * master will know that asserts are needed back to
915 * the lower nodes. */ 925 * the lower nodes. */
916 mlog(0, "%s:%.*s: requests only up to %u but master " 926 mlog(0, "%s: res %.*s, Requests only up to %u but "
917 "is %u, keep going\n", dlm->name, namelen, 927 "master is %u, keep going\n", dlm->name, namelen,
918 lockid, nodenum, mle->master); 928 lockid, nodenum, mle->master);
919 } 929 }
920 } 930 }
@@ -924,13 +934,12 @@ wait:
924 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); 934 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
925 if (ret < 0) { 935 if (ret < 0) {
926 wait_on_recovery = 1; 936 wait_on_recovery = 1;
927 mlog(0, "%s:%.*s: node map changed, redo the " 937 mlog(0, "%s: res %.*s, Node map changed, redo the master "
928 "master request now, blocked=%d\n", 938 "request now, blocked=%d\n", dlm->name, res->lockname.len,
929 dlm->name, res->lockname.len,
930 res->lockname.name, blocked); 939 res->lockname.name, blocked);
931 if (++tries > 20) { 940 if (++tries > 20) {
932 mlog(ML_ERROR, "%s:%.*s: spinning on " 941 mlog(ML_ERROR, "%s: res %.*s, Spinning on "
933 "dlm_wait_for_lock_mastery, blocked=%d\n", 942 "dlm_wait_for_lock_mastery, blocked = %d\n",
934 dlm->name, res->lockname.len, 943 dlm->name, res->lockname.len,
935 res->lockname.name, blocked); 944 res->lockname.name, blocked);
936 dlm_print_one_lock_resource(res); 945 dlm_print_one_lock_resource(res);
@@ -940,7 +949,8 @@ wait:
940 goto redo_request; 949 goto redo_request;
941 } 950 }
942 951
943 mlog(0, "lockres mastered by %u\n", res->owner); 952 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
953 res->lockname.name, res->owner);
944 /* make sure we never continue without this */ 954 /* make sure we never continue without this */
945 BUG_ON(res->owner == O2NM_MAX_NODES); 955 BUG_ON(res->owner == O2NM_MAX_NODES);
946 956
@@ -952,8 +962,6 @@ wait:
952 962
953wake_waiters: 963wake_waiters:
954 spin_lock(&res->spinlock); 964 spin_lock(&res->spinlock);
955 if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
956 dlm_lockres_drop_inflight_ref(dlm, res);
957 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 965 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
958 spin_unlock(&res->spinlock); 966 spin_unlock(&res->spinlock);
959 wake_up(&res->wq); 967 wake_up(&res->wq);
@@ -1426,9 +1434,7 @@ way_up_top:
1426 } 1434 }
1427 1435
1428 if (res->owner == dlm->node_num) { 1436 if (res->owner == dlm->node_num) {
1429 mlog(0, "%s:%.*s: setting bit %u in refmap\n", 1437 dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
1430 dlm->name, namelen, name, request->node_idx);
1431 dlm_lockres_set_refmap_bit(request->node_idx, res);
1432 spin_unlock(&res->spinlock); 1438 spin_unlock(&res->spinlock);
1433 response = DLM_MASTER_RESP_YES; 1439 response = DLM_MASTER_RESP_YES;
1434 if (mle) 1440 if (mle)
@@ -1493,10 +1499,8 @@ way_up_top:
1493 * go back and clean the mles on any 1499 * go back and clean the mles on any
1494 * other nodes */ 1500 * other nodes */
1495 dispatch_assert = 1; 1501 dispatch_assert = 1;
1496 dlm_lockres_set_refmap_bit(request->node_idx, res); 1502 dlm_lockres_set_refmap_bit(dlm, res,
1497 mlog(0, "%s:%.*s: setting bit %u in refmap\n", 1503 request->node_idx);
1498 dlm->name, namelen, name,
1499 request->node_idx);
1500 } else 1504 } else
1501 response = DLM_MASTER_RESP_NO; 1505 response = DLM_MASTER_RESP_NO;
1502 } else { 1506 } else {
@@ -1702,7 +1706,7 @@ again:
1702 "lockres, set the bit in the refmap\n", 1706 "lockres, set the bit in the refmap\n",
1703 namelen, lockname, to); 1707 namelen, lockname, to);
1704 spin_lock(&res->spinlock); 1708 spin_lock(&res->spinlock);
1705 dlm_lockres_set_refmap_bit(to, res); 1709 dlm_lockres_set_refmap_bit(dlm, res, to);
1706 spin_unlock(&res->spinlock); 1710 spin_unlock(&res->spinlock);
1707 } 1711 }
1708 } 1712 }
@@ -2187,8 +2191,6 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2187 namelen = res->lockname.len; 2191 namelen = res->lockname.len;
2188 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 2192 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2189 2193
2190 mlog(0, "%s:%.*s: sending deref to %d\n",
2191 dlm->name, namelen, lockname, res->owner);
2192 memset(&deref, 0, sizeof(deref)); 2194 memset(&deref, 0, sizeof(deref));
2193 deref.node_idx = dlm->node_num; 2195 deref.node_idx = dlm->node_num;
2194 deref.namelen = namelen; 2196 deref.namelen = namelen;
@@ -2197,14 +2199,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2197 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, 2199 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2198 &deref, sizeof(deref), res->owner, &r); 2200 &deref, sizeof(deref), res->owner, &r);
2199 if (ret < 0) 2201 if (ret < 0)
2200 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " 2202 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
2201 "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key, 2203 dlm->name, namelen, lockname, ret, res->owner);
2202 res->owner);
2203 else if (r < 0) { 2204 else if (r < 0) {
2204 /* BAD. other node says I did not have a ref. */ 2205 /* BAD. other node says I did not have a ref. */
2205 mlog(ML_ERROR,"while dropping ref on %s:%.*s " 2206 mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2206 "(master=%u) got %d.\n", dlm->name, namelen, 2207 dlm->name, namelen, lockname, res->owner, r);
2207 lockname, res->owner, r);
2208 dlm_print_one_lock_resource(res); 2208 dlm_print_one_lock_resource(res);
2209 BUG(); 2209 BUG();
2210 } 2210 }
@@ -2260,7 +2260,7 @@ int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2260 else { 2260 else {
2261 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2261 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2262 if (test_bit(node, res->refmap)) { 2262 if (test_bit(node, res->refmap)) {
2263 dlm_lockres_clear_refmap_bit(node, res); 2263 dlm_lockres_clear_refmap_bit(dlm, res, node);
2264 cleared = 1; 2264 cleared = 1;
2265 } 2265 }
2266 } 2266 }
@@ -2320,7 +2320,7 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2320 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2320 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2321 if (test_bit(node, res->refmap)) { 2321 if (test_bit(node, res->refmap)) {
2322 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); 2322 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2323 dlm_lockres_clear_refmap_bit(node, res); 2323 dlm_lockres_clear_refmap_bit(dlm, res, node);
2324 cleared = 1; 2324 cleared = 1;
2325 } 2325 }
2326 spin_unlock(&res->spinlock); 2326 spin_unlock(&res->spinlock);
@@ -2802,7 +2802,8 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2802 BUG_ON(!list_empty(&lock->bast_list)); 2802 BUG_ON(!list_empty(&lock->bast_list));
2803 BUG_ON(lock->ast_pending); 2803 BUG_ON(lock->ast_pending);
2804 BUG_ON(lock->bast_pending); 2804 BUG_ON(lock->bast_pending);
2805 dlm_lockres_clear_refmap_bit(lock->ml.node, res); 2805 dlm_lockres_clear_refmap_bit(dlm, res,
2806 lock->ml.node);
2806 list_del_init(&lock->list); 2807 list_del_init(&lock->list);
2807 dlm_lock_put(lock); 2808 dlm_lock_put(lock);
2808 /* In a normal unlock, we would have added a 2809 /* In a normal unlock, we would have added a
@@ -2823,7 +2824,7 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2823 mlog(0, "%s:%.*s: node %u had a ref to this " 2824 mlog(0, "%s:%.*s: node %u had a ref to this "
2824 "migrating lockres, clearing\n", dlm->name, 2825 "migrating lockres, clearing\n", dlm->name,
2825 res->lockname.len, res->lockname.name, bit); 2826 res->lockname.len, res->lockname.name, bit);
2826 dlm_lockres_clear_refmap_bit(bit, res); 2827 dlm_lockres_clear_refmap_bit(dlm, res, bit);
2827 } 2828 }
2828 bit++; 2829 bit++;
2829 } 2830 }
@@ -2916,9 +2917,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2916 &migrate, sizeof(migrate), nodenum, 2917 &migrate, sizeof(migrate), nodenum,
2917 &status); 2918 &status);
2918 if (ret < 0) { 2919 if (ret < 0) {
2919 mlog(ML_ERROR, "Error %d when sending message %u (key " 2920 mlog(ML_ERROR, "%s: res %.*s, Error %d send "
2920 "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG, 2921 "MIGRATE_REQUEST to node %u\n", dlm->name,
2921 dlm->key, nodenum); 2922 migrate.namelen, migrate.name, ret, nodenum);
2922 if (!dlm_is_host_down(ret)) { 2923 if (!dlm_is_host_down(ret)) {
2923 mlog(ML_ERROR, "unhandled error=%d!\n", ret); 2924 mlog(ML_ERROR, "unhandled error=%d!\n", ret);
2924 BUG(); 2925 BUG();
@@ -2937,7 +2938,7 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2937 dlm->name, res->lockname.len, res->lockname.name, 2938 dlm->name, res->lockname.len, res->lockname.name,
2938 nodenum); 2939 nodenum);
2939 spin_lock(&res->spinlock); 2940 spin_lock(&res->spinlock);
2940 dlm_lockres_set_refmap_bit(nodenum, res); 2941 dlm_lockres_set_refmap_bit(dlm, res, nodenum);
2941 spin_unlock(&res->spinlock); 2942 spin_unlock(&res->spinlock);
2942 } 2943 }
2943 } 2944 }
@@ -3271,7 +3272,7 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3271 * mastery reference here since old_master will briefly have 3272 * mastery reference here since old_master will briefly have
3272 * a reference after the migration completes */ 3273 * a reference after the migration completes */
3273 spin_lock(&res->spinlock); 3274 spin_lock(&res->spinlock);
3274 dlm_lockres_set_refmap_bit(old_master, res); 3275 dlm_lockres_set_refmap_bit(dlm, res, old_master);
3275 spin_unlock(&res->spinlock); 3276 spin_unlock(&res->spinlock);
3276 3277
3277 mlog(0, "now time to do a migrate request to other nodes\n"); 3278 mlog(0, "now time to do a migrate request to other nodes\n");
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 7efab6d28a21..01ebfd0bdad7 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -362,40 +362,38 @@ static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
362} 362}
363 363
364 364
365int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) 365void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
366{ 366{
367 if (timeout) { 367 if (dlm_is_node_dead(dlm, node))
368 mlog(ML_NOTICE, "%s: waiting %dms for notification of " 368 return;
369 "death of node %u\n", dlm->name, timeout, node); 369
370 printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
371 "domain %s\n", node, dlm->name);
372
373 if (timeout)
370 wait_event_timeout(dlm->dlm_reco_thread_wq, 374 wait_event_timeout(dlm->dlm_reco_thread_wq,
371 dlm_is_node_dead(dlm, node), 375 dlm_is_node_dead(dlm, node),
372 msecs_to_jiffies(timeout)); 376 msecs_to_jiffies(timeout));
373 } else { 377 else
374 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
375 "of death of node %u\n", dlm->name, node);
376 wait_event(dlm->dlm_reco_thread_wq, 378 wait_event(dlm->dlm_reco_thread_wq,
377 dlm_is_node_dead(dlm, node)); 379 dlm_is_node_dead(dlm, node));
378 }
379 /* for now, return 0 */
380 return 0;
381} 380}
382 381
383int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) 382void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
384{ 383{
385 if (timeout) { 384 if (dlm_is_node_recovered(dlm, node))
386 mlog(0, "%s: waiting %dms for notification of " 385 return;
387 "recovery of node %u\n", dlm->name, timeout, node); 386
387 printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
388 "domain %s\n", node, dlm->name);
389
390 if (timeout)
388 wait_event_timeout(dlm->dlm_reco_thread_wq, 391 wait_event_timeout(dlm->dlm_reco_thread_wq,
389 dlm_is_node_recovered(dlm, node), 392 dlm_is_node_recovered(dlm, node),
390 msecs_to_jiffies(timeout)); 393 msecs_to_jiffies(timeout));
391 } else { 394 else
392 mlog(0, "%s: waiting indefinitely for notification "
393 "of recovery of node %u\n", dlm->name, node);
394 wait_event(dlm->dlm_reco_thread_wq, 395 wait_event(dlm->dlm_reco_thread_wq,
395 dlm_is_node_recovered(dlm, node)); 396 dlm_is_node_recovered(dlm, node));
396 }
397 /* for now, return 0 */
398 return 0;
399} 397}
400 398
401/* callers of the top-level api calls (dlmlock/dlmunlock) should 399/* callers of the top-level api calls (dlmlock/dlmunlock) should
@@ -430,6 +428,8 @@ static void dlm_begin_recovery(struct dlm_ctxt *dlm)
430{ 428{
431 spin_lock(&dlm->spinlock); 429 spin_lock(&dlm->spinlock);
432 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 430 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
431 printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
432 dlm->name, dlm->reco.dead_node);
433 dlm->reco.state |= DLM_RECO_STATE_ACTIVE; 433 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
434 spin_unlock(&dlm->spinlock); 434 spin_unlock(&dlm->spinlock);
435} 435}
@@ -440,9 +440,18 @@ static void dlm_end_recovery(struct dlm_ctxt *dlm)
440 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); 440 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
441 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; 441 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
442 spin_unlock(&dlm->spinlock); 442 spin_unlock(&dlm->spinlock);
443 printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
443 wake_up(&dlm->reco.event); 444 wake_up(&dlm->reco.event);
444} 445}
445 446
447static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
448{
449 printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
450 "dead node %u in domain %s\n", dlm->reco.new_master,
451 (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
452 dlm->reco.dead_node, dlm->name);
453}
454
446static int dlm_do_recovery(struct dlm_ctxt *dlm) 455static int dlm_do_recovery(struct dlm_ctxt *dlm)
447{ 456{
448 int status = 0; 457 int status = 0;
@@ -505,9 +514,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
505 } 514 }
506 mlog(0, "another node will master this recovery session.\n"); 515 mlog(0, "another node will master this recovery session.\n");
507 } 516 }
508 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n", 517
509 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master, 518 dlm_print_recovery_master(dlm);
510 dlm->node_num, dlm->reco.dead_node);
511 519
512 /* it is safe to start everything back up here 520 /* it is safe to start everything back up here
513 * because all of the dead node's lock resources 521 * because all of the dead node's lock resources
@@ -518,15 +526,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
518 return 0; 526 return 0;
519 527
520master_here: 528master_here:
521 mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node " 529 dlm_print_recovery_master(dlm);
522 "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task),
523 dlm->node_num, dlm->reco.dead_node, dlm->name);
524 530
525 status = dlm_remaster_locks(dlm, dlm->reco.dead_node); 531 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
526 if (status < 0) { 532 if (status < 0) {
527 /* we should never hit this anymore */ 533 /* we should never hit this anymore */
528 mlog(ML_ERROR, "error %d remastering locks for node %u, " 534 mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
529 "retrying.\n", status, dlm->reco.dead_node); 535 "retrying.\n", dlm->name, status, dlm->reco.dead_node);
530 /* yield a bit to allow any final network messages 536 /* yield a bit to allow any final network messages
531 * to get handled on remaining nodes */ 537 * to get handled on remaining nodes */
532 msleep(100); 538 msleep(100);
@@ -567,7 +573,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
567 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); 573 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
568 ndata->state = DLM_RECO_NODE_DATA_REQUESTING; 574 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
569 575
570 mlog(0, "requesting lock info from node %u\n", 576 mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
571 ndata->node_num); 577 ndata->node_num);
572 578
573 if (ndata->node_num == dlm->node_num) { 579 if (ndata->node_num == dlm->node_num) {
@@ -640,7 +646,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
640 spin_unlock(&dlm_reco_state_lock); 646 spin_unlock(&dlm_reco_state_lock);
641 } 647 }
642 648
643 mlog(0, "done requesting all lock info\n"); 649 mlog(0, "%s: Done requesting all lock info\n", dlm->name);
644 650
645 /* nodes should be sending reco data now 651 /* nodes should be sending reco data now
646 * just need to wait */ 652 * just need to wait */
@@ -802,10 +808,9 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
802 808
803 /* negative status is handled by caller */ 809 /* negative status is handled by caller */
804 if (ret < 0) 810 if (ret < 0)
805 mlog(ML_ERROR, "Error %d when sending message %u (key " 811 mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
806 "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG, 812 "to recover dead node %u\n", dlm->name, ret,
807 dlm->key, request_from); 813 request_from, dead_node);
808
809 // return from here, then 814 // return from here, then
810 // sleep until all received or error 815 // sleep until all received or error
811 return ret; 816 return ret;
@@ -956,9 +961,9 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
956 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, 961 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
957 sizeof(done_msg), send_to, &tmpret); 962 sizeof(done_msg), send_to, &tmpret);
958 if (ret < 0) { 963 if (ret < 0) {
959 mlog(ML_ERROR, "Error %d when sending message %u (key " 964 mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
960 "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG, 965 "to recover dead node %u\n", dlm->name, ret, send_to,
961 dlm->key, send_to); 966 dead_node);
962 if (!dlm_is_host_down(ret)) { 967 if (!dlm_is_host_down(ret)) {
963 BUG(); 968 BUG();
964 } 969 }
@@ -1127,9 +1132,11 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1127 if (ret < 0) { 1132 if (ret < 0) {
1128 /* XXX: negative status is not handled. 1133 /* XXX: negative status is not handled.
1129 * this will end up killing this node. */ 1134 * this will end up killing this node. */
1130 mlog(ML_ERROR, "Error %d when sending message %u (key " 1135 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1131 "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG, 1136 "node %u (%s)\n", dlm->name, mres->lockname_len,
1132 dlm->key, send_to); 1137 mres->lockname, ret, send_to,
1138 (orig_flags & DLM_MRES_MIGRATION ?
1139 "migration" : "recovery"));
1133 } else { 1140 } else {
1134 /* might get an -ENOMEM back here */ 1141 /* might get an -ENOMEM back here */
1135 ret = status; 1142 ret = status;
@@ -1767,7 +1774,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1767 dlm->name, mres->lockname_len, mres->lockname, 1774 dlm->name, mres->lockname_len, mres->lockname,
1768 from); 1775 from);
1769 spin_lock(&res->spinlock); 1776 spin_lock(&res->spinlock);
1770 dlm_lockres_set_refmap_bit(from, res); 1777 dlm_lockres_set_refmap_bit(dlm, res, from);
1771 spin_unlock(&res->spinlock); 1778 spin_unlock(&res->spinlock);
1772 added++; 1779 added++;
1773 break; 1780 break;
@@ -1965,7 +1972,7 @@ skip_lvb:
1965 mlog(0, "%s:%.*s: added lock for node %u, " 1972 mlog(0, "%s:%.*s: added lock for node %u, "
1966 "setting refmap bit\n", dlm->name, 1973 "setting refmap bit\n", dlm->name,
1967 res->lockname.len, res->lockname.name, ml->node); 1974 res->lockname.len, res->lockname.name, ml->node);
1968 dlm_lockres_set_refmap_bit(ml->node, res); 1975 dlm_lockres_set_refmap_bit(dlm, res, ml->node);
1969 added++; 1976 added++;
1970 } 1977 }
1971 spin_unlock(&res->spinlock); 1978 spin_unlock(&res->spinlock);
@@ -2084,6 +2091,9 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2084 2091
2085 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { 2092 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2086 if (res->owner == dead_node) { 2093 if (res->owner == dead_node) {
2094 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2095 dlm->name, res->lockname.len, res->lockname.name,
2096 res->owner, new_master);
2087 list_del_init(&res->recovering); 2097 list_del_init(&res->recovering);
2088 spin_lock(&res->spinlock); 2098 spin_lock(&res->spinlock);
2089 /* new_master has our reference from 2099 /* new_master has our reference from
@@ -2105,40 +2115,30 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2105 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2115 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2106 bucket = dlm_lockres_hash(dlm, i); 2116 bucket = dlm_lockres_hash(dlm, i);
2107 hlist_for_each_entry(res, hash_iter, bucket, hash_node) { 2117 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
2108 if (res->state & DLM_LOCK_RES_RECOVERING) { 2118 if (!(res->state & DLM_LOCK_RES_RECOVERING))
2109 if (res->owner == dead_node) { 2119 continue;
2110 mlog(0, "(this=%u) res %.*s owner=%u "
2111 "was not on recovering list, but "
2112 "clearing state anyway\n",
2113 dlm->node_num, res->lockname.len,
2114 res->lockname.name, new_master);
2115 } else if (res->owner == dlm->node_num) {
2116 mlog(0, "(this=%u) res %.*s owner=%u "
2117 "was not on recovering list, "
2118 "owner is THIS node, clearing\n",
2119 dlm->node_num, res->lockname.len,
2120 res->lockname.name, new_master);
2121 } else
2122 continue;
2123 2120
2124 if (!list_empty(&res->recovering)) { 2121 if (res->owner != dead_node &&
2125 mlog(0, "%s:%.*s: lockres was " 2122 res->owner != dlm->node_num)
2126 "marked RECOVERING, owner=%u\n", 2123 continue;
2127 dlm->name, res->lockname.len, 2124
2128 res->lockname.name, res->owner); 2125 if (!list_empty(&res->recovering)) {
2129 list_del_init(&res->recovering); 2126 list_del_init(&res->recovering);
2130 dlm_lockres_put(res); 2127 dlm_lockres_put(res);
2131 }
2132 spin_lock(&res->spinlock);
2133 /* new_master has our reference from
2134 * the lock state sent during recovery */
2135 dlm_change_lockres_owner(dlm, res, new_master);
2136 res->state &= ~DLM_LOCK_RES_RECOVERING;
2137 if (__dlm_lockres_has_locks(res))
2138 __dlm_dirty_lockres(dlm, res);
2139 spin_unlock(&res->spinlock);
2140 wake_up(&res->wq);
2141 } 2128 }
2129
2130 /* new_master has our reference from
2131 * the lock state sent during recovery */
2132 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2133 dlm->name, res->lockname.len, res->lockname.name,
2134 res->owner, new_master);
2135 spin_lock(&res->spinlock);
2136 dlm_change_lockres_owner(dlm, res, new_master);
2137 res->state &= ~DLM_LOCK_RES_RECOVERING;
2138 if (__dlm_lockres_has_locks(res))
2139 __dlm_dirty_lockres(dlm, res);
2140 spin_unlock(&res->spinlock);
2141 wake_up(&res->wq);
2142 } 2142 }
2143 } 2143 }
2144} 2144}
@@ -2252,12 +2252,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2252 res->lockname.len, res->lockname.name, freed, dead_node); 2252 res->lockname.len, res->lockname.name, freed, dead_node);
2253 __dlm_print_one_lock_resource(res); 2253 __dlm_print_one_lock_resource(res);
2254 } 2254 }
2255 dlm_lockres_clear_refmap_bit(dead_node, res); 2255 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2256 } else if (test_bit(dead_node, res->refmap)) { 2256 } else if (test_bit(dead_node, res->refmap)) {
2257 mlog(0, "%s:%.*s: dead node %u had a ref, but had " 2257 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2258 "no locks and had not purged before dying\n", dlm->name, 2258 "no locks and had not purged before dying\n", dlm->name,
2259 res->lockname.len, res->lockname.name, dead_node); 2259 res->lockname.len, res->lockname.name, dead_node);
2260 dlm_lockres_clear_refmap_bit(dead_node, res); 2260 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2261 } 2261 }
2262 2262
2263 /* do not kick thread yet */ 2263 /* do not kick thread yet */
@@ -2324,9 +2324,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2324 dlm_revalidate_lvb(dlm, res, dead_node); 2324 dlm_revalidate_lvb(dlm, res, dead_node);
2325 if (res->owner == dead_node) { 2325 if (res->owner == dead_node) {
2326 if (res->state & DLM_LOCK_RES_DROPPING_REF) { 2326 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2327 mlog(ML_NOTICE, "Ignore %.*s for " 2327 mlog(ML_NOTICE, "%s: res %.*s, Skip "
2328 "recovery as it is being freed\n", 2328 "recovery as it is being freed\n",
2329 res->lockname.len, 2329 dlm->name, res->lockname.len,
2330 res->lockname.name); 2330 res->lockname.name);
2331 } else 2331 } else
2332 dlm_move_lockres_to_recovery_list(dlm, 2332 dlm_move_lockres_to_recovery_list(dlm,
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 1d6d1d22c471..e73c833fc2a1 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -94,24 +94,26 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res)
94{ 94{
95 int bit; 95 int bit;
96 96
97 assert_spin_locked(&res->spinlock);
98
97 if (__dlm_lockres_has_locks(res)) 99 if (__dlm_lockres_has_locks(res))
98 return 0; 100 return 0;
99 101
102 /* Locks are in the process of being created */
103 if (res->inflight_locks)
104 return 0;
105
100 if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY) 106 if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
101 return 0; 107 return 0;
102 108
103 if (res->state & DLM_LOCK_RES_RECOVERING) 109 if (res->state & DLM_LOCK_RES_RECOVERING)
104 return 0; 110 return 0;
105 111
112 /* Another node has this resource with this node as the master */
106 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); 113 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
107 if (bit < O2NM_MAX_NODES) 114 if (bit < O2NM_MAX_NODES)
108 return 0; 115 return 0;
109 116
110 /*
111 * since the bit for dlm->node_num is not set, inflight_locks better
112 * be zero
113 */
114 BUG_ON(res->inflight_locks != 0);
115 return 1; 117 return 1;
116} 118}
117 119
@@ -185,8 +187,6 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
185 /* clear our bit from the master's refmap, ignore errors */ 187 /* clear our bit from the master's refmap, ignore errors */
186 ret = dlm_drop_lockres_ref(dlm, res); 188 ret = dlm_drop_lockres_ref(dlm, res);
187 if (ret < 0) { 189 if (ret < 0) {
188 mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name,
189 res->lockname.len, res->lockname.name, ret);
190 if (!dlm_is_host_down(ret)) 190 if (!dlm_is_host_down(ret))
191 BUG(); 191 BUG();
192 } 192 }
@@ -209,7 +209,7 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
209 BUG(); 209 BUG();
210 } 210 }
211 211
212 __dlm_unhash_lockres(res); 212 __dlm_unhash_lockres(dlm, res);
213 213
214 /* lockres is not in the hash now. drop the flag and wake up 214 /* lockres is not in the hash now. drop the flag and wake up
215 * any processes waiting in dlm_get_lock_resource. */ 215 * any processes waiting in dlm_get_lock_resource. */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e1ed5e502ff2..81a4cd22f80b 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1692,7 +1692,7 @@ int ocfs2_open_lock(struct inode *inode)
1692 mlog(0, "inode %llu take PRMODE open lock\n", 1692 mlog(0, "inode %llu take PRMODE open lock\n",
1693 (unsigned long long)OCFS2_I(inode)->ip_blkno); 1693 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1694 1694
1695 if (ocfs2_mount_local(osb)) 1695 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
1696 goto out; 1696 goto out;
1697 1697
1698 lockres = &OCFS2_I(inode)->ip_open_lockres; 1698 lockres = &OCFS2_I(inode)->ip_open_lockres;
@@ -1718,6 +1718,12 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
1718 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1718 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1719 write ? "EXMODE" : "PRMODE"); 1719 write ? "EXMODE" : "PRMODE");
1720 1720
1721 if (ocfs2_is_hard_readonly(osb)) {
1722 if (write)
1723 status = -EROFS;
1724 goto out;
1725 }
1726
1721 if (ocfs2_mount_local(osb)) 1727 if (ocfs2_mount_local(osb))
1722 goto out; 1728 goto out;
1723 1729
@@ -2298,7 +2304,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
2298 if (ocfs2_is_hard_readonly(osb)) { 2304 if (ocfs2_is_hard_readonly(osb)) {
2299 if (ex) 2305 if (ex)
2300 status = -EROFS; 2306 status = -EROFS;
2301 goto bail; 2307 goto getbh;
2302 } 2308 }
2303 2309
2304 if (ocfs2_mount_local(osb)) 2310 if (ocfs2_mount_local(osb))
@@ -2356,7 +2362,7 @@ local:
2356 mlog_errno(status); 2362 mlog_errno(status);
2357 goto bail; 2363 goto bail;
2358 } 2364 }
2359 2365getbh:
2360 if (ret_bh) { 2366 if (ret_bh) {
2361 status = ocfs2_assign_bh(inode, ret_bh, local_bh); 2367 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
2362 if (status < 0) { 2368 if (status < 0) {
@@ -2628,8 +2634,11 @@ int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2628 2634
2629 BUG_ON(!dl); 2635 BUG_ON(!dl);
2630 2636
2631 if (ocfs2_is_hard_readonly(osb)) 2637 if (ocfs2_is_hard_readonly(osb)) {
2632 return -EROFS; 2638 if (ex)
2639 return -EROFS;
2640 return 0;
2641 }
2633 2642
2634 if (ocfs2_mount_local(osb)) 2643 if (ocfs2_mount_local(osb))
2635 return 0; 2644 return 0;
@@ -2647,7 +2656,7 @@ void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2647 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 2656 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2648 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); 2657 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2649 2658
2650 if (!ocfs2_mount_local(osb)) 2659 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
2651 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level); 2660 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2652} 2661}
2653 2662
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 23457b491e8c..2f5b92ef0e53 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -832,6 +832,102 @@ out:
832 return ret; 832 return ret;
833} 833}
834 834
835int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin)
836{
837 struct inode *inode = file->f_mapping->host;
838 int ret;
839 unsigned int is_last = 0, is_data = 0;
840 u16 cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
841 u32 cpos, cend, clen, hole_size;
842 u64 extoff, extlen;
843 struct buffer_head *di_bh = NULL;
844 struct ocfs2_extent_rec rec;
845
846 BUG_ON(origin != SEEK_DATA && origin != SEEK_HOLE);
847
848 ret = ocfs2_inode_lock(inode, &di_bh, 0);
849 if (ret) {
850 mlog_errno(ret);
851 goto out;
852 }
853
854 down_read(&OCFS2_I(inode)->ip_alloc_sem);
855
856 if (*offset >= inode->i_size) {
857 ret = -ENXIO;
858 goto out_unlock;
859 }
860
861 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
862 if (origin == SEEK_HOLE)
863 *offset = inode->i_size;
864 goto out_unlock;
865 }
866
867 clen = 0;
868 cpos = *offset >> cs_bits;
869 cend = ocfs2_clusters_for_bytes(inode->i_sb, inode->i_size);
870
871 while (cpos < cend && !is_last) {
872 ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size,
873 &rec, &is_last);
874 if (ret) {
875 mlog_errno(ret);
876 goto out_unlock;
877 }
878
879 extoff = cpos;
880 extoff <<= cs_bits;
881
882 if (rec.e_blkno == 0ULL) {
883 clen = hole_size;
884 is_data = 0;
885 } else {
886 clen = le16_to_cpu(rec.e_leaf_clusters) -
887 (cpos - le32_to_cpu(rec.e_cpos));
888 is_data = (rec.e_flags & OCFS2_EXT_UNWRITTEN) ? 0 : 1;
889 }
890
891 if ((!is_data && origin == SEEK_HOLE) ||
892 (is_data && origin == SEEK_DATA)) {
893 if (extoff > *offset)
894 *offset = extoff;
895 goto out_unlock;
896 }
897
898 if (!is_last)
899 cpos += clen;
900 }
901
902 if (origin == SEEK_HOLE) {
903 extoff = cpos;
904 extoff <<= cs_bits;
905 extlen = clen;
906 extlen <<= cs_bits;
907
908 if ((extoff + extlen) > inode->i_size)
909 extlen = inode->i_size - extoff;
910 extoff += extlen;
911 if (extoff > *offset)
912 *offset = extoff;
913 goto out_unlock;
914 }
915
916 ret = -ENXIO;
917
918out_unlock:
919
920 brelse(di_bh);
921
922 up_read(&OCFS2_I(inode)->ip_alloc_sem);
923
924 ocfs2_inode_unlock(inode, 0);
925out:
926 if (ret && ret != -ENXIO)
927 ret = -ENXIO;
928 return ret;
929}
930
835int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, 931int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
836 struct buffer_head *bhs[], int flags, 932 struct buffer_head *bhs[], int flags,
837 int (*validate)(struct super_block *sb, 933 int (*validate)(struct super_block *sb,
diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h
index e79d41c2c909..67ea57d2fd59 100644
--- a/fs/ocfs2/extent_map.h
+++ b/fs/ocfs2/extent_map.h
@@ -53,6 +53,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
53int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 53int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
54 u64 map_start, u64 map_len); 54 u64 map_start, u64 map_len);
55 55
56int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin);
57
56int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, 58int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
57 u32 *p_cluster, u32 *num_clusters, 59 u32 *p_cluster, u32 *num_clusters,
58 struct ocfs2_extent_list *el, 60 struct ocfs2_extent_list *el,
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index de4ea1af041b..6e396683c3d4 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1950,6 +1950,9 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1950 if (ret < 0) 1950 if (ret < 0)
1951 mlog_errno(ret); 1951 mlog_errno(ret);
1952 1952
1953 if (file->f_flags & O_SYNC)
1954 handle->h_sync = 1;
1955
1953 ocfs2_commit_trans(osb, handle); 1956 ocfs2_commit_trans(osb, handle);
1954 1957
1955out_inode_unlock: 1958out_inode_unlock:
@@ -2052,6 +2055,23 @@ out:
2052 return ret; 2055 return ret;
2053} 2056}
2054 2057
2058static void ocfs2_aiodio_wait(struct inode *inode)
2059{
2060 wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
2061
2062 wait_event(*wq, (atomic_read(&OCFS2_I(inode)->ip_unaligned_aio) == 0));
2063}
2064
2065static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
2066{
2067 int blockmask = inode->i_sb->s_blocksize - 1;
2068 loff_t final_size = pos + count;
2069
2070 if ((pos & blockmask) || (final_size & blockmask))
2071 return 1;
2072 return 0;
2073}
2074
2055static int ocfs2_prepare_inode_for_refcount(struct inode *inode, 2075static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
2056 struct file *file, 2076 struct file *file,
2057 loff_t pos, size_t count, 2077 loff_t pos, size_t count,
@@ -2230,6 +2250,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
2230 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2250 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2231 int full_coherency = !(osb->s_mount_opt & 2251 int full_coherency = !(osb->s_mount_opt &
2232 OCFS2_MOUNT_COHERENCY_BUFFERED); 2252 OCFS2_MOUNT_COHERENCY_BUFFERED);
2253 int unaligned_dio = 0;
2233 2254
2234 trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry, 2255 trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
2235 (unsigned long long)OCFS2_I(inode)->ip_blkno, 2256 (unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2297,6 +2318,10 @@ relock:
2297 goto out; 2318 goto out;
2298 } 2319 }
2299 2320
2321 if (direct_io && !is_sync_kiocb(iocb))
2322 unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_left,
2323 *ppos);
2324
2300 /* 2325 /*
2301 * We can't complete the direct I/O as requested, fall back to 2326 * We can't complete the direct I/O as requested, fall back to
2302 * buffered I/O. 2327 * buffered I/O.
@@ -2311,6 +2336,18 @@ relock:
2311 goto relock; 2336 goto relock;
2312 } 2337 }
2313 2338
2339 if (unaligned_dio) {
2340 /*
2341 * Wait on previous unaligned aio to complete before
2342 * proceeding.
2343 */
2344 ocfs2_aiodio_wait(inode);
2345
2346 /* Mark the iocb as needing a decrement in ocfs2_dio_end_io */
2347 atomic_inc(&OCFS2_I(inode)->ip_unaligned_aio);
2348 ocfs2_iocb_set_unaligned_aio(iocb);
2349 }
2350
2314 /* 2351 /*
2315 * To later detect whether a journal commit for sync writes is 2352 * To later detect whether a journal commit for sync writes is
2316 * necessary, we sample i_size, and cluster count here. 2353 * necessary, we sample i_size, and cluster count here.
@@ -2382,8 +2419,12 @@ out_dio:
2382 if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) { 2419 if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2383 rw_level = -1; 2420 rw_level = -1;
2384 have_alloc_sem = 0; 2421 have_alloc_sem = 0;
2422 unaligned_dio = 0;
2385 } 2423 }
2386 2424
2425 if (unaligned_dio)
2426 atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
2427
2387out: 2428out:
2388 if (rw_level != -1) 2429 if (rw_level != -1)
2389 ocfs2_rw_unlock(inode, rw_level); 2430 ocfs2_rw_unlock(inode, rw_level);
@@ -2591,6 +2632,57 @@ bail:
2591 return ret; 2632 return ret;
2592} 2633}
2593 2634
2635/* Refer generic_file_llseek_unlocked() */
2636static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int origin)
2637{
2638 struct inode *inode = file->f_mapping->host;
2639 int ret = 0;
2640
2641 mutex_lock(&inode->i_mutex);
2642
2643 switch (origin) {
2644 case SEEK_SET:
2645 break;
2646 case SEEK_END:
2647 offset += inode->i_size;
2648 break;
2649 case SEEK_CUR:
2650 if (offset == 0) {
2651 offset = file->f_pos;
2652 goto out;
2653 }
2654 offset += file->f_pos;
2655 break;
2656 case SEEK_DATA:
2657 case SEEK_HOLE:
2658 ret = ocfs2_seek_data_hole_offset(file, &offset, origin);
2659 if (ret)
2660 goto out;
2661 break;
2662 default:
2663 ret = -EINVAL;
2664 goto out;
2665 }
2666
2667 if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
2668 ret = -EINVAL;
2669 if (!ret && offset > inode->i_sb->s_maxbytes)
2670 ret = -EINVAL;
2671 if (ret)
2672 goto out;
2673
2674 if (offset != file->f_pos) {
2675 file->f_pos = offset;
2676 file->f_version = 0;
2677 }
2678
2679out:
2680 mutex_unlock(&inode->i_mutex);
2681 if (ret)
2682 return ret;
2683 return offset;
2684}
2685
2594const struct inode_operations ocfs2_file_iops = { 2686const struct inode_operations ocfs2_file_iops = {
2595 .setattr = ocfs2_setattr, 2687 .setattr = ocfs2_setattr,
2596 .getattr = ocfs2_getattr, 2688 .getattr = ocfs2_getattr,
@@ -2615,7 +2707,7 @@ const struct inode_operations ocfs2_special_file_iops = {
2615 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks! 2707 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2616 */ 2708 */
2617const struct file_operations ocfs2_fops = { 2709const struct file_operations ocfs2_fops = {
2618 .llseek = generic_file_llseek, 2710 .llseek = ocfs2_file_llseek,
2619 .read = do_sync_read, 2711 .read = do_sync_read,
2620 .write = do_sync_write, 2712 .write = do_sync_write,
2621 .mmap = ocfs2_mmap, 2713 .mmap = ocfs2_mmap,
@@ -2663,7 +2755,7 @@ const struct file_operations ocfs2_dops = {
2663 * the cluster. 2755 * the cluster.
2664 */ 2756 */
2665const struct file_operations ocfs2_fops_no_plocks = { 2757const struct file_operations ocfs2_fops_no_plocks = {
2666 .llseek = generic_file_llseek, 2758 .llseek = ocfs2_file_llseek,
2667 .read = do_sync_read, 2759 .read = do_sync_read,
2668 .write = do_sync_write, 2760 .write = do_sync_write,
2669 .mmap = ocfs2_mmap, 2761 .mmap = ocfs2_mmap,
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index a22d2c098890..17454a904d7b 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -951,7 +951,7 @@ static void ocfs2_cleanup_delete_inode(struct inode *inode,
951 trace_ocfs2_cleanup_delete_inode( 951 trace_ocfs2_cleanup_delete_inode(
952 (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data); 952 (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
953 if (sync_data) 953 if (sync_data)
954 write_inode_now(inode, 1); 954 filemap_write_and_wait(inode->i_mapping);
955 truncate_inode_pages(&inode->i_data, 0); 955 truncate_inode_pages(&inode->i_data, 0);
956} 956}
957 957
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 1c508b149b3a..88924a3133fa 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -43,6 +43,9 @@ struct ocfs2_inode_info
43 /* protects extended attribute changes on this inode */ 43 /* protects extended attribute changes on this inode */
44 struct rw_semaphore ip_xattr_sem; 44 struct rw_semaphore ip_xattr_sem;
45 45
46 /* Number of outstanding AIO's which are not page aligned */
47 atomic_t ip_unaligned_aio;
48
46 /* These fields are protected by ip_lock */ 49 /* These fields are protected by ip_lock */
47 spinlock_t ip_lock; 50 spinlock_t ip_lock;
48 u32 ip_open_count; 51 u32 ip_open_count;
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index bc91072b7219..726ff265b296 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -122,7 +122,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
122 if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) & 122 if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) &
123 (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) { 123 (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) {
124 if (!capable(CAP_LINUX_IMMUTABLE)) 124 if (!capable(CAP_LINUX_IMMUTABLE))
125 goto bail_unlock; 125 goto bail_commit;
126 } 126 }
127 127
128 ocfs2_inode->ip_attr = flags; 128 ocfs2_inode->ip_attr = flags;
@@ -132,6 +132,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
132 if (status < 0) 132 if (status < 0)
133 mlog_errno(status); 133 mlog_errno(status);
134 134
135bail_commit:
135 ocfs2_commit_trans(osb, handle); 136 ocfs2_commit_trans(osb, handle);
136bail_unlock: 137bail_unlock:
137 ocfs2_inode_unlock(inode, 1); 138 ocfs2_inode_unlock(inode, 1);
@@ -381,7 +382,7 @@ int ocfs2_info_handle_freeinode(struct inode *inode,
381 if (!oifi) { 382 if (!oifi) {
382 status = -ENOMEM; 383 status = -ENOMEM;
383 mlog_errno(status); 384 mlog_errno(status);
384 goto bail; 385 goto out_err;
385 } 386 }
386 387
387 if (o2info_from_user(*oifi, req)) 388 if (o2info_from_user(*oifi, req))
@@ -431,7 +432,7 @@ bail:
431 o2info_set_request_error(&oifi->ifi_req, req); 432 o2info_set_request_error(&oifi->ifi_req, req);
432 433
433 kfree(oifi); 434 kfree(oifi);
434 435out_err:
435 return status; 436 return status;
436} 437}
437 438
@@ -666,7 +667,7 @@ int ocfs2_info_handle_freefrag(struct inode *inode,
666 if (!oiff) { 667 if (!oiff) {
667 status = -ENOMEM; 668 status = -ENOMEM;
668 mlog_errno(status); 669 mlog_errno(status);
669 goto bail; 670 goto out_err;
670 } 671 }
671 672
672 if (o2info_from_user(*oiff, req)) 673 if (o2info_from_user(*oiff, req))
@@ -716,7 +717,7 @@ bail:
716 o2info_set_request_error(&oiff->iff_req, req); 717 o2info_set_request_error(&oiff->iff_req, req);
717 718
718 kfree(oiff); 719 kfree(oiff);
719 720out_err:
720 return status; 721 return status;
721} 722}
722 723
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 295d56454e8b..0a42ae96dca7 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1544,9 +1544,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
1544 /* we need to run complete recovery for offline orphan slots */ 1544 /* we need to run complete recovery for offline orphan slots */
1545 ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); 1545 ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
1546 1546
1547 mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n", 1547 printk(KERN_NOTICE "ocfs2: Begin replay journal (node %d, slot %d) on "\
1548 node_num, slot_num, 1548 "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
1549 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); 1549 MINOR(osb->sb->s_dev));
1550 1550
1551 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); 1551 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
1552 1552
@@ -1601,6 +1601,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
1601 1601
1602 jbd2_journal_destroy(journal); 1602 jbd2_journal_destroy(journal);
1603 1603
1604 printk(KERN_NOTICE "ocfs2: End replay journal (node %d, slot %d) on "\
1605 "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
1606 MINOR(osb->sb->s_dev));
1604done: 1607done:
1605 /* drop the lock on this nodes journal */ 1608 /* drop the lock on this nodes journal */
1606 if (got_lock) 1609 if (got_lock)
@@ -1808,6 +1811,20 @@ static inline unsigned long ocfs2_orphan_scan_timeout(void)
1808 * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This 1811 * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This
1809 * is done to catch any orphans that are left over in orphan directories. 1812 * is done to catch any orphans that are left over in orphan directories.
1810 * 1813 *
1814 * It scans all slots, even ones that are in use. It does so to handle the
1815 * case described below:
1816 *
1817 * Node 1 has an inode it was using. The dentry went away due to memory
1818 * pressure. Node 1 closes the inode, but it's on the free list. The node
1819 * has the open lock.
1820 * Node 2 unlinks the inode. It grabs the dentry lock to notify others,
1821 * but node 1 has no dentry and doesn't get the message. It trylocks the
1822 * open lock, sees that another node has a PR, and does nothing.
1823 * Later node 2 runs its orphan dir. It igets the inode, trylocks the
1824 * open lock, sees the PR still, and does nothing.
1825 * Basically, we have to trigger an orphan iput on node 1. The only way
1826 * for this to happen is if node 1 runs node 2's orphan dir.
1827 *
1811 * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT 1828 * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT
1812 * seconds. It gets an EX lock on os_lockres and checks sequence number 1829 * seconds. It gets an EX lock on os_lockres and checks sequence number
1813 * stored in LVB. If the sequence number has changed, it means some other 1830 * stored in LVB. If the sequence number has changed, it means some other
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 68cf2f6d3c6a..a3385b63ff5e 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -441,10 +441,11 @@ static inline int ocfs2_mknod_credits(struct super_block *sb, int is_dir,
441#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2) 441#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2)
442 442
443/* file update (nlink, etc) + directory mtime/ctime + dir entry block + quota 443/* file update (nlink, etc) + directory mtime/ctime + dir entry block + quota
444 * update on dir + index leaf + dx root update for free list */ 444 * update on dir + index leaf + dx root update for free list +
445 * previous dirblock update in the free list */
445static inline int ocfs2_link_credits(struct super_block *sb) 446static inline int ocfs2_link_credits(struct super_block *sb)
446{ 447{
447 return 2*OCFS2_INODE_UPDATE_CREDITS + 3 + 448 return 2*OCFS2_INODE_UPDATE_CREDITS + 4 +
448 ocfs2_quota_trans_credits(sb); 449 ocfs2_quota_trans_credits(sb);
449} 450}
450 451
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 3e9393ca39eb..9cd41083e991 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -61,7 +61,7 @@ static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
61static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, 61static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
62 struct page *page) 62 struct page *page)
63{ 63{
64 int ret; 64 int ret = VM_FAULT_NOPAGE;
65 struct inode *inode = file->f_path.dentry->d_inode; 65 struct inode *inode = file->f_path.dentry->d_inode;
66 struct address_space *mapping = inode->i_mapping; 66 struct address_space *mapping = inode->i_mapping;
67 loff_t pos = page_offset(page); 67 loff_t pos = page_offset(page);
@@ -71,32 +71,25 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
71 void *fsdata; 71 void *fsdata;
72 loff_t size = i_size_read(inode); 72 loff_t size = i_size_read(inode);
73 73
74 /*
75 * Another node might have truncated while we were waiting on
76 * cluster locks.
77 * We don't check size == 0 before the shift. This is borrowed
78 * from do_generic_file_read.
79 */
80 last_index = (size - 1) >> PAGE_CACHE_SHIFT; 74 last_index = (size - 1) >> PAGE_CACHE_SHIFT;
81 if (unlikely(!size || page->index > last_index)) {
82 ret = -EINVAL;
83 goto out;
84 }
85 75
86 /* 76 /*
87 * The i_size check above doesn't catch the case where nodes 77 * There are cases that lead to the page no longer bebongs to the
88 * truncated and then re-extended the file. We'll re-check the 78 * mapping.
89 * page mapping after taking the page lock inside of 79 * 1) pagecache truncates locally due to memory pressure.
90 * ocfs2_write_begin_nolock(). 80 * 2) pagecache truncates when another is taking EX lock against
81 * inode lock. see ocfs2_data_convert_worker.
82 *
83 * The i_size check doesn't catch the case where nodes truncated and
84 * then re-extended the file. We'll re-check the page mapping after
85 * taking the page lock inside of ocfs2_write_begin_nolock().
86 *
87 * Let VM retry with these cases.
91 */ 88 */
92 if (!PageUptodate(page) || page->mapping != inode->i_mapping) { 89 if ((page->mapping != inode->i_mapping) ||
93 /* 90 (!PageUptodate(page)) ||
94 * the page has been umapped in ocfs2_data_downconvert_worker. 91 (page_offset(page) >= size))
95 * So return 0 here and let VFS retry.
96 */
97 ret = 0;
98 goto out; 92 goto out;
99 }
100 93
101 /* 94 /*
102 * Call ocfs2_write_begin() and ocfs2_write_end() to take 95 * Call ocfs2_write_begin() and ocfs2_write_end() to take
@@ -116,17 +109,21 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
116 if (ret) { 109 if (ret) {
117 if (ret != -ENOSPC) 110 if (ret != -ENOSPC)
118 mlog_errno(ret); 111 mlog_errno(ret);
112 if (ret == -ENOMEM)
113 ret = VM_FAULT_OOM;
114 else
115 ret = VM_FAULT_SIGBUS;
119 goto out; 116 goto out;
120 } 117 }
121 118
122 ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page, 119 if (!locked_page) {
123 fsdata); 120 ret = VM_FAULT_NOPAGE;
124 if (ret < 0) {
125 mlog_errno(ret);
126 goto out; 121 goto out;
127 } 122 }
123 ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page,
124 fsdata);
128 BUG_ON(ret != len); 125 BUG_ON(ret != len);
129 ret = 0; 126 ret = VM_FAULT_LOCKED;
130out: 127out:
131 return ret; 128 return ret;
132} 129}
@@ -168,8 +165,6 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
168 165
169out: 166out:
170 ocfs2_unblock_signals(&oldset); 167 ocfs2_unblock_signals(&oldset);
171 if (ret)
172 ret = VM_FAULT_SIGBUS;
173 return ret; 168 return ret;
174} 169}
175 170
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index d53cb706f14c..184c76b8c293 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -745,7 +745,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
745 */ 745 */
746 ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop, 746 ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
747 new_phys_cpos); 747 new_phys_cpos);
748 if (!new_phys_cpos) { 748 if (!*new_phys_cpos) {
749 ret = -ENOSPC; 749 ret = -ENOSPC;
750 goto out_commit; 750 goto out_commit;
751 } 751 }
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 409285854f64..d355e6e36b36 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -836,18 +836,65 @@ static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb,
836 836
837static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap) 837static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
838{ 838{
839 __test_and_set_bit_le(bit, bitmap); 839 __set_bit_le(bit, bitmap);
840} 840}
841#define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr)) 841#define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr))
842 842
843static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap) 843static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap)
844{ 844{
845 __test_and_clear_bit_le(bit, bitmap); 845 __clear_bit_le(bit, bitmap);
846} 846}
847#define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr)) 847#define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr))
848 848
849#define ocfs2_test_bit test_bit_le 849#define ocfs2_test_bit test_bit_le
850#define ocfs2_find_next_zero_bit find_next_zero_bit_le 850#define ocfs2_find_next_zero_bit find_next_zero_bit_le
851#define ocfs2_find_next_bit find_next_bit_le 851#define ocfs2_find_next_bit find_next_bit_le
852
853static inline void *correct_addr_and_bit_unaligned(int *bit, void *addr)
854{
855#if BITS_PER_LONG == 64
856 *bit += ((unsigned long) addr & 7UL) << 3;
857 addr = (void *) ((unsigned long) addr & ~7UL);
858#elif BITS_PER_LONG == 32
859 *bit += ((unsigned long) addr & 3UL) << 3;
860 addr = (void *) ((unsigned long) addr & ~3UL);
861#else
862#error "how many bits you are?!"
863#endif
864 return addr;
865}
866
867static inline void ocfs2_set_bit_unaligned(int bit, void *bitmap)
868{
869 bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
870 ocfs2_set_bit(bit, bitmap);
871}
872
873static inline void ocfs2_clear_bit_unaligned(int bit, void *bitmap)
874{
875 bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
876 ocfs2_clear_bit(bit, bitmap);
877}
878
879static inline int ocfs2_test_bit_unaligned(int bit, void *bitmap)
880{
881 bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
882 return ocfs2_test_bit(bit, bitmap);
883}
884
885static inline int ocfs2_find_next_zero_bit_unaligned(void *bitmap, int max,
886 int start)
887{
888 int fix = 0, ret, tmpmax;
889 bitmap = correct_addr_and_bit_unaligned(&fix, bitmap);
890 tmpmax = max + fix;
891 start += fix;
892
893 ret = ocfs2_find_next_zero_bit(bitmap, tmpmax, start) - fix;
894 if (ret > max)
895 return max;
896 return ret;
897}
898
852#endif /* OCFS2_H */ 899#endif /* OCFS2_H */
853 900
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index dc8007fc9247..f100bf70a906 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -404,7 +404,9 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery(
404 int status = 0; 404 int status = 0;
405 struct ocfs2_quota_recovery *rec; 405 struct ocfs2_quota_recovery *rec;
406 406
407 mlog(ML_NOTICE, "Beginning quota recovery in slot %u\n", slot_num); 407 printk(KERN_NOTICE "ocfs2: Beginning quota recovery on device (%s) for "
408 "slot %u\n", osb->dev_str, slot_num);
409
408 rec = ocfs2_alloc_quota_recovery(); 410 rec = ocfs2_alloc_quota_recovery();
409 if (!rec) 411 if (!rec)
410 return ERR_PTR(-ENOMEM); 412 return ERR_PTR(-ENOMEM);
@@ -549,8 +551,8 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
549 goto out_commit; 551 goto out_commit;
550 } 552 }
551 lock_buffer(qbh); 553 lock_buffer(qbh);
552 WARN_ON(!ocfs2_test_bit(bit, dchunk->dqc_bitmap)); 554 WARN_ON(!ocfs2_test_bit_unaligned(bit, dchunk->dqc_bitmap));
553 ocfs2_clear_bit(bit, dchunk->dqc_bitmap); 555 ocfs2_clear_bit_unaligned(bit, dchunk->dqc_bitmap);
554 le32_add_cpu(&dchunk->dqc_free, 1); 556 le32_add_cpu(&dchunk->dqc_free, 1);
555 unlock_buffer(qbh); 557 unlock_buffer(qbh);
556 ocfs2_journal_dirty(handle, qbh); 558 ocfs2_journal_dirty(handle, qbh);
@@ -596,7 +598,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
596 struct inode *lqinode; 598 struct inode *lqinode;
597 unsigned int flags; 599 unsigned int flags;
598 600
599 mlog(ML_NOTICE, "Finishing quota recovery in slot %u\n", slot_num); 601 printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for "
602 "slot %u\n", osb->dev_str, slot_num);
603
600 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 604 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
601 for (type = 0; type < MAXQUOTAS; type++) { 605 for (type = 0; type < MAXQUOTAS; type++) {
602 if (list_empty(&(rec->r_list[type]))) 606 if (list_empty(&(rec->r_list[type])))
@@ -612,8 +616,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
612 /* Someone else is holding the lock? Then he must be 616 /* Someone else is holding the lock? Then he must be
613 * doing the recovery. Just skip the file... */ 617 * doing the recovery. Just skip the file... */
614 if (status == -EAGAIN) { 618 if (status == -EAGAIN) {
615 mlog(ML_NOTICE, "skipping quota recovery for slot %d " 619 printk(KERN_NOTICE "ocfs2: Skipping quota recovery on "
616 "because quota file is locked.\n", slot_num); 620 "device (%s) for slot %d because quota file is "
621 "locked.\n", osb->dev_str, slot_num);
617 status = 0; 622 status = 0;
618 goto out_put; 623 goto out_put;
619 } else if (status < 0) { 624 } else if (status < 0) {
@@ -944,7 +949,7 @@ static struct ocfs2_quota_chunk *ocfs2_find_free_entry(struct super_block *sb,
944 * ol_quota_entries_per_block(sb); 949 * ol_quota_entries_per_block(sb);
945 } 950 }
946 951
947 found = ocfs2_find_next_zero_bit(dchunk->dqc_bitmap, len, 0); 952 found = ocfs2_find_next_zero_bit_unaligned(dchunk->dqc_bitmap, len, 0);
948 /* We failed? */ 953 /* We failed? */
949 if (found == len) { 954 if (found == len) {
950 mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u" 955 mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u"
@@ -1208,7 +1213,7 @@ static void olq_alloc_dquot(struct buffer_head *bh, void *private)
1208 struct ocfs2_local_disk_chunk *dchunk; 1213 struct ocfs2_local_disk_chunk *dchunk;
1209 1214
1210 dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data; 1215 dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data;
1211 ocfs2_set_bit(*offset, dchunk->dqc_bitmap); 1216 ocfs2_set_bit_unaligned(*offset, dchunk->dqc_bitmap);
1212 le32_add_cpu(&dchunk->dqc_free, -1); 1217 le32_add_cpu(&dchunk->dqc_free, -1);
1213} 1218}
1214 1219
@@ -1289,7 +1294,7 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
1289 (od->dq_chunk->qc_headerbh->b_data); 1294 (od->dq_chunk->qc_headerbh->b_data);
1290 /* Mark structure as freed */ 1295 /* Mark structure as freed */
1291 lock_buffer(od->dq_chunk->qc_headerbh); 1296 lock_buffer(od->dq_chunk->qc_headerbh);
1292 ocfs2_clear_bit(offset, dchunk->dqc_bitmap); 1297 ocfs2_clear_bit_unaligned(offset, dchunk->dqc_bitmap);
1293 le32_add_cpu(&dchunk->dqc_free, 1); 1298 le32_add_cpu(&dchunk->dqc_free, 1);
1294 unlock_buffer(od->dq_chunk->qc_headerbh); 1299 unlock_buffer(od->dq_chunk->qc_headerbh);
1295 ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh); 1300 ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index 26fc0014d509..1424c151cccc 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -493,8 +493,8 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
493 goto bail; 493 goto bail;
494 } 494 }
495 } else 495 } else
496 mlog(ML_NOTICE, "slot %d is already allocated to this node!\n", 496 printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already "
497 slot); 497 "allocated to this node!\n", slot, osb->dev_str);
498 498
499 ocfs2_set_slot(si, slot, osb->node_num); 499 ocfs2_set_slot(si, slot, osb->node_num);
500 osb->slot_num = slot; 500 osb->slot_num = slot;
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index 19965b00c43c..94368017edb3 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -28,6 +28,7 @@
28#include "cluster/masklog.h" 28#include "cluster/masklog.h"
29#include "cluster/nodemanager.h" 29#include "cluster/nodemanager.h"
30#include "cluster/heartbeat.h" 30#include "cluster/heartbeat.h"
31#include "cluster/tcp.h"
31 32
32#include "stackglue.h" 33#include "stackglue.h"
33 34
@@ -256,6 +257,61 @@ static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb)
256} 257}
257 258
258/* 259/*
260 * Check if this node is heartbeating and is connected to all other
261 * heartbeating nodes.
262 */
263static int o2cb_cluster_check(void)
264{
265 u8 node_num;
266 int i;
267 unsigned long hbmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
268 unsigned long netmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
269
270 node_num = o2nm_this_node();
271 if (node_num == O2NM_MAX_NODES) {
272 printk(KERN_ERR "o2cb: This node has not been configured.\n");
273 return -EINVAL;
274 }
275
276 /*
277 * o2dlm expects o2net sockets to be created. If not, then
278 * dlm_join_domain() fails with a stack of errors which are both cryptic
279 * and incomplete. The idea here is to detect upfront whether we have
280 * managed to connect to all nodes or not. If not, then list the nodes
281 * to allow the user to check the configuration (incorrect IP, firewall,
282 * etc.) Yes, this is racy. But its not the end of the world.
283 */
284#define O2CB_MAP_STABILIZE_COUNT 60
285 for (i = 0; i < O2CB_MAP_STABILIZE_COUNT; ++i) {
286 o2hb_fill_node_map(hbmap, sizeof(hbmap));
287 if (!test_bit(node_num, hbmap)) {
288 printk(KERN_ERR "o2cb: %s heartbeat has not been "
289 "started.\n", (o2hb_global_heartbeat_active() ?
290 "Global" : "Local"));
291 return -EINVAL;
292 }
293 o2net_fill_node_map(netmap, sizeof(netmap));
294 /* Force set the current node to allow easy compare */
295 set_bit(node_num, netmap);
296 if (!memcmp(hbmap, netmap, sizeof(hbmap)))
297 return 0;
298 if (i < O2CB_MAP_STABILIZE_COUNT)
299 msleep(1000);
300 }
301
302 printk(KERN_ERR "o2cb: This node could not connect to nodes:");
303 i = -1;
304 while ((i = find_next_bit(hbmap, O2NM_MAX_NODES,
305 i + 1)) < O2NM_MAX_NODES) {
306 if (!test_bit(i, netmap))
307 printk(" %u", i);
308 }
309 printk(".\n");
310
311 return -ENOTCONN;
312}
313
314/*
259 * Called from the dlm when it's about to evict a node. This is how the 315 * Called from the dlm when it's about to evict a node. This is how the
260 * classic stack signals node death. 316 * classic stack signals node death.
261 */ 317 */
@@ -263,8 +319,8 @@ static void o2dlm_eviction_cb(int node_num, void *data)
263{ 319{
264 struct ocfs2_cluster_connection *conn = data; 320 struct ocfs2_cluster_connection *conn = data;
265 321
266 mlog(ML_NOTICE, "o2dlm has evicted node %d from group %.*s\n", 322 printk(KERN_NOTICE "o2cb: o2dlm has evicted node %d from domain %.*s\n",
267 node_num, conn->cc_namelen, conn->cc_name); 323 node_num, conn->cc_namelen, conn->cc_name);
268 324
269 conn->cc_recovery_handler(node_num, conn->cc_recovery_data); 325 conn->cc_recovery_handler(node_num, conn->cc_recovery_data);
270} 326}
@@ -280,12 +336,11 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
280 BUG_ON(conn == NULL); 336 BUG_ON(conn == NULL);
281 BUG_ON(conn->cc_proto == NULL); 337 BUG_ON(conn->cc_proto == NULL);
282 338
283 /* for now we only have one cluster/node, make sure we see it 339 /* Ensure cluster stack is up and all nodes are connected */
284 * in the heartbeat universe */ 340 rc = o2cb_cluster_check();
285 if (!o2hb_check_local_node_heartbeating()) { 341 if (rc) {
286 if (o2hb_global_heartbeat_active()) 342 printk(KERN_ERR "o2cb: Cluster check failed. Fix errors "
287 mlog(ML_ERROR, "Global heartbeat not started\n"); 343 "before retrying.\n");
288 rc = -EINVAL;
289 goto out; 344 goto out;
290 } 345 }
291 346
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 56f61027236b..4994f8b0e604 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -54,6 +54,7 @@
54#include "ocfs1_fs_compat.h" 54#include "ocfs1_fs_compat.h"
55 55
56#include "alloc.h" 56#include "alloc.h"
57#include "aops.h"
57#include "blockcheck.h" 58#include "blockcheck.h"
58#include "dlmglue.h" 59#include "dlmglue.h"
59#include "export.h" 60#include "export.h"
@@ -1107,9 +1108,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1107 1108
1108 ocfs2_set_ro_flag(osb, 1); 1109 ocfs2_set_ro_flag(osb, 1);
1109 1110
1110 printk(KERN_NOTICE "Readonly device detected. No cluster " 1111 printk(KERN_NOTICE "ocfs2: Readonly device (%s) detected. "
1111 "services will be utilized for this mount. Recovery " 1112 "Cluster services will not be used for this mount. "
1112 "will be skipped.\n"); 1113 "Recovery will be skipped.\n", osb->dev_str);
1113 } 1114 }
1114 1115
1115 if (!ocfs2_is_hard_readonly(osb)) { 1116 if (!ocfs2_is_hard_readonly(osb)) {
@@ -1616,12 +1617,17 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1616 return 0; 1617 return 0;
1617} 1618}
1618 1619
1620wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
1621
1619static int __init ocfs2_init(void) 1622static int __init ocfs2_init(void)
1620{ 1623{
1621 int status; 1624 int status, i;
1622 1625
1623 ocfs2_print_version(); 1626 ocfs2_print_version();
1624 1627
1628 for (i = 0; i < OCFS2_IOEND_WQ_HASH_SZ; i++)
1629 init_waitqueue_head(&ocfs2__ioend_wq[i]);
1630
1625 status = init_ocfs2_uptodate_cache(); 1631 status = init_ocfs2_uptodate_cache();
1626 if (status < 0) { 1632 if (status < 0) {
1627 mlog_errno(status); 1633 mlog_errno(status);
@@ -1760,7 +1766,7 @@ static void ocfs2_inode_init_once(void *data)
1760 ocfs2_extent_map_init(&oi->vfs_inode); 1766 ocfs2_extent_map_init(&oi->vfs_inode);
1761 INIT_LIST_HEAD(&oi->ip_io_markers); 1767 INIT_LIST_HEAD(&oi->ip_io_markers);
1762 oi->ip_dir_start_lookup = 0; 1768 oi->ip_dir_start_lookup = 0;
1763 1769 atomic_set(&oi->ip_unaligned_aio, 0);
1764 init_rwsem(&oi->ip_alloc_sem); 1770 init_rwsem(&oi->ip_alloc_sem);
1765 init_rwsem(&oi->ip_xattr_sem); 1771 init_rwsem(&oi->ip_xattr_sem);
1766 mutex_init(&oi->ip_io_mutex); 1772 mutex_init(&oi->ip_io_mutex);
@@ -1974,7 +1980,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1974 * If we failed before we got a uuid_str yet, we can't stop 1980 * If we failed before we got a uuid_str yet, we can't stop
1975 * heartbeat. Otherwise, do it. 1981 * heartbeat. Otherwise, do it.
1976 */ 1982 */
1977 if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str) 1983 if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str &&
1984 !ocfs2_is_hard_readonly(osb))
1978 hangup_needed = 1; 1985 hangup_needed = 1;
1979 1986
1980 if (osb->cconn) 1987 if (osb->cconn)
@@ -2353,7 +2360,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
2353 mlog_errno(status); 2360 mlog_errno(status);
2354 goto bail; 2361 goto bail;
2355 } 2362 }
2356 cleancache_init_shared_fs((char *)&uuid_net_key, sb); 2363 cleancache_init_shared_fs((char *)&di->id2.i_super.s_uuid, sb);
2357 2364
2358bail: 2365bail:
2359 return status; 2366 return status;
@@ -2462,8 +2469,8 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
2462 goto finally; 2469 goto finally;
2463 } 2470 }
2464 } else { 2471 } else {
2465 mlog(ML_NOTICE, "File system was not unmounted cleanly, " 2472 printk(KERN_NOTICE "ocfs2: File system on device (%s) was not "
2466 "recovering volume.\n"); 2473 "unmounted cleanly, recovering it.\n", osb->dev_str);
2467 } 2474 }
2468 2475
2469 local = ocfs2_mount_local(osb); 2476 local = ocfs2_mount_local(osb);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 194fb22ef79d..aa9e8777b09a 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -2376,16 +2376,18 @@ static int ocfs2_remove_value_outside(struct inode*inode,
2376 } 2376 }
2377 2377
2378 ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt); 2378 ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
2379 if (ret < 0) {
2380 mlog_errno(ret);
2381 break;
2382 }
2383 2379
2384 ocfs2_commit_trans(osb, ctxt.handle); 2380 ocfs2_commit_trans(osb, ctxt.handle);
2385 if (ctxt.meta_ac) { 2381 if (ctxt.meta_ac) {
2386 ocfs2_free_alloc_context(ctxt.meta_ac); 2382 ocfs2_free_alloc_context(ctxt.meta_ac);
2387 ctxt.meta_ac = NULL; 2383 ctxt.meta_ac = NULL;
2388 } 2384 }
2385
2386 if (ret < 0) {
2387 mlog_errno(ret);
2388 break;
2389 }
2390
2389 } 2391 }
2390 2392
2391 if (ctxt.meta_ac) 2393 if (ctxt.meta_ac)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 2db1bd3173b2..851ba3dcdc29 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1652,46 +1652,12 @@ out:
1652 return error; 1652 return error;
1653} 1653}
1654 1654
1655static int proc_pid_fd_link_getattr(struct vfsmount *mnt, struct dentry *dentry,
1656 struct kstat *stat)
1657{
1658 struct inode *inode = dentry->d_inode;
1659 struct task_struct *task = get_proc_task(inode);
1660 int rc;
1661
1662 if (task == NULL)
1663 return -ESRCH;
1664
1665 rc = -EACCES;
1666 if (lock_trace(task))
1667 goto out_task;
1668
1669 generic_fillattr(inode, stat);
1670 unlock_trace(task);
1671 rc = 0;
1672out_task:
1673 put_task_struct(task);
1674 return rc;
1675}
1676
1677static const struct inode_operations proc_pid_link_inode_operations = { 1655static const struct inode_operations proc_pid_link_inode_operations = {
1678 .readlink = proc_pid_readlink, 1656 .readlink = proc_pid_readlink,
1679 .follow_link = proc_pid_follow_link, 1657 .follow_link = proc_pid_follow_link,
1680 .setattr = proc_setattr, 1658 .setattr = proc_setattr,
1681}; 1659};
1682 1660
1683static const struct inode_operations proc_fdinfo_link_inode_operations = {
1684 .setattr = proc_setattr,
1685 .getattr = proc_pid_fd_link_getattr,
1686};
1687
1688static const struct inode_operations proc_fd_link_inode_operations = {
1689 .readlink = proc_pid_readlink,
1690 .follow_link = proc_pid_follow_link,
1691 .setattr = proc_setattr,
1692 .getattr = proc_pid_fd_link_getattr,
1693};
1694
1695 1661
1696/* building an inode */ 1662/* building an inode */
1697 1663
@@ -1923,61 +1889,49 @@ out:
1923 1889
1924static int proc_fd_info(struct inode *inode, struct path *path, char *info) 1890static int proc_fd_info(struct inode *inode, struct path *path, char *info)
1925{ 1891{
1926 struct task_struct *task; 1892 struct task_struct *task = get_proc_task(inode);
1927 struct files_struct *files; 1893 struct files_struct *files = NULL;
1928 struct file *file; 1894 struct file *file;
1929 int fd = proc_fd(inode); 1895 int fd = proc_fd(inode);
1930 int rc;
1931
1932 task = get_proc_task(inode);
1933 if (!task)
1934 return -ENOENT;
1935
1936 rc = -EACCES;
1937 if (lock_trace(task))
1938 goto out_task;
1939
1940 rc = -ENOENT;
1941 files = get_files_struct(task);
1942 if (files == NULL)
1943 goto out_unlock;
1944 1896
1945 /* 1897 if (task) {
1946 * We are not taking a ref to the file structure, so we must 1898 files = get_files_struct(task);
1947 * hold ->file_lock. 1899 put_task_struct(task);
1948 */ 1900 }
1949 spin_lock(&files->file_lock); 1901 if (files) {
1950 file = fcheck_files(files, fd); 1902 /*
1951 if (file) { 1903 * We are not taking a ref to the file structure, so we must
1952 unsigned int f_flags; 1904 * hold ->file_lock.
1953 struct fdtable *fdt; 1905 */
1954 1906 spin_lock(&files->file_lock);
1955 fdt = files_fdtable(files); 1907 file = fcheck_files(files, fd);
1956 f_flags = file->f_flags & ~O_CLOEXEC; 1908 if (file) {
1957 if (FD_ISSET(fd, fdt->close_on_exec)) 1909 unsigned int f_flags;
1958 f_flags |= O_CLOEXEC; 1910 struct fdtable *fdt;
1959 1911
1960 if (path) { 1912 fdt = files_fdtable(files);
1961 *path = file->f_path; 1913 f_flags = file->f_flags & ~O_CLOEXEC;
1962 path_get(&file->f_path); 1914 if (FD_ISSET(fd, fdt->close_on_exec))
1915 f_flags |= O_CLOEXEC;
1916
1917 if (path) {
1918 *path = file->f_path;
1919 path_get(&file->f_path);
1920 }
1921 if (info)
1922 snprintf(info, PROC_FDINFO_MAX,
1923 "pos:\t%lli\n"
1924 "flags:\t0%o\n",
1925 (long long) file->f_pos,
1926 f_flags);
1927 spin_unlock(&files->file_lock);
1928 put_files_struct(files);
1929 return 0;
1963 } 1930 }
1964 if (info) 1931 spin_unlock(&files->file_lock);
1965 snprintf(info, PROC_FDINFO_MAX, 1932 put_files_struct(files);
1966 "pos:\t%lli\n" 1933 }
1967 "flags:\t0%o\n", 1934 return -ENOENT;
1968 (long long) file->f_pos,
1969 f_flags);
1970 rc = 0;
1971 } else
1972 rc = -ENOENT;
1973 spin_unlock(&files->file_lock);
1974 put_files_struct(files);
1975
1976out_unlock:
1977 unlock_trace(task);
1978out_task:
1979 put_task_struct(task);
1980 return rc;
1981} 1935}
1982 1936
1983static int proc_fd_link(struct inode *inode, struct path *path) 1937static int proc_fd_link(struct inode *inode, struct path *path)
@@ -2072,7 +2026,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
2072 spin_unlock(&files->file_lock); 2026 spin_unlock(&files->file_lock);
2073 put_files_struct(files); 2027 put_files_struct(files);
2074 2028
2075 inode->i_op = &proc_fd_link_inode_operations; 2029 inode->i_op = &proc_pid_link_inode_operations;
2076 inode->i_size = 64; 2030 inode->i_size = 64;
2077 ei->op.proc_get_link = proc_fd_link; 2031 ei->op.proc_get_link = proc_fd_link;
2078 d_set_d_op(dentry, &tid_fd_dentry_operations); 2032 d_set_d_op(dentry, &tid_fd_dentry_operations);
@@ -2104,12 +2058,7 @@ static struct dentry *proc_lookupfd_common(struct inode *dir,
2104 if (fd == ~0U) 2058 if (fd == ~0U)
2105 goto out; 2059 goto out;
2106 2060
2107 result = ERR_PTR(-EACCES);
2108 if (lock_trace(task))
2109 goto out;
2110
2111 result = instantiate(dir, dentry, task, &fd); 2061 result = instantiate(dir, dentry, task, &fd);
2112 unlock_trace(task);
2113out: 2062out:
2114 put_task_struct(task); 2063 put_task_struct(task);
2115out_no_task: 2064out_no_task:
@@ -2129,28 +2078,23 @@ static int proc_readfd_common(struct file * filp, void * dirent,
2129 retval = -ENOENT; 2078 retval = -ENOENT;
2130 if (!p) 2079 if (!p)
2131 goto out_no_task; 2080 goto out_no_task;
2132
2133 retval = -EACCES;
2134 if (lock_trace(p))
2135 goto out;
2136
2137 retval = 0; 2081 retval = 0;
2138 2082
2139 fd = filp->f_pos; 2083 fd = filp->f_pos;
2140 switch (fd) { 2084 switch (fd) {
2141 case 0: 2085 case 0:
2142 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) 2086 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
2143 goto out_unlock; 2087 goto out;
2144 filp->f_pos++; 2088 filp->f_pos++;
2145 case 1: 2089 case 1:
2146 ino = parent_ino(dentry); 2090 ino = parent_ino(dentry);
2147 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) 2091 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
2148 goto out_unlock; 2092 goto out;
2149 filp->f_pos++; 2093 filp->f_pos++;
2150 default: 2094 default:
2151 files = get_files_struct(p); 2095 files = get_files_struct(p);
2152 if (!files) 2096 if (!files)
2153 goto out_unlock; 2097 goto out;
2154 rcu_read_lock(); 2098 rcu_read_lock();
2155 for (fd = filp->f_pos-2; 2099 for (fd = filp->f_pos-2;
2156 fd < files_fdtable(files)->max_fds; 2100 fd < files_fdtable(files)->max_fds;
@@ -2174,9 +2118,6 @@ static int proc_readfd_common(struct file * filp, void * dirent,
2174 rcu_read_unlock(); 2118 rcu_read_unlock();
2175 put_files_struct(files); 2119 put_files_struct(files);
2176 } 2120 }
2177
2178out_unlock:
2179 unlock_trace(p);
2180out: 2121out:
2181 put_task_struct(p); 2122 put_task_struct(p);
2182out_no_task: 2123out_no_task:
@@ -2254,7 +2195,6 @@ static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
2254 ei->fd = fd; 2195 ei->fd = fd;
2255 inode->i_mode = S_IFREG | S_IRUSR; 2196 inode->i_mode = S_IFREG | S_IRUSR;
2256 inode->i_fop = &proc_fdinfo_file_operations; 2197 inode->i_fop = &proc_fdinfo_file_operations;
2257 inode->i_op = &proc_fdinfo_link_inode_operations;
2258 d_set_d_op(dentry, &tid_fd_dentry_operations); 2198 d_set_d_op(dentry, &tid_fd_dentry_operations);
2259 d_add(dentry, inode); 2199 d_add(dentry, inode);
2260 /* Close the race of the process dying before we return the dentry */ 2200 /* Close the race of the process dying before we return the dentry */
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 586174168e2a..80e4645f7990 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -131,12 +131,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
131 K(i.freeswap), 131 K(i.freeswap),
132 K(global_page_state(NR_FILE_DIRTY)), 132 K(global_page_state(NR_FILE_DIRTY)),
133 K(global_page_state(NR_WRITEBACK)), 133 K(global_page_state(NR_WRITEBACK)),
134 K(global_page_state(NR_ANON_PAGES)
135#ifdef CONFIG_TRANSPARENT_HUGEPAGE 134#ifdef CONFIG_TRANSPARENT_HUGEPAGE
135 K(global_page_state(NR_ANON_PAGES)
136 + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * 136 + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
137 HPAGE_PMD_NR 137 HPAGE_PMD_NR),
138#else
139 K(global_page_state(NR_ANON_PAGES)),
138#endif 140#endif
139 ),
140 K(global_page_state(NR_FILE_MAPPED)), 141 K(global_page_state(NR_FILE_MAPPED)),
141 K(global_page_state(NR_SHMEM)), 142 K(global_page_state(NR_SHMEM)),
142 K(global_page_state(NR_SLAB_RECLAIMABLE) + 143 K(global_page_state(NR_SLAB_RECLAIMABLE) +
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 9a8a2b77b874..03102d978180 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -91,20 +91,18 @@ static struct file_system_type proc_fs_type = {
91 91
92void __init proc_root_init(void) 92void __init proc_root_init(void)
93{ 93{
94 struct vfsmount *mnt;
95 int err; 94 int err;
96 95
97 proc_init_inodecache(); 96 proc_init_inodecache();
98 err = register_filesystem(&proc_fs_type); 97 err = register_filesystem(&proc_fs_type);
99 if (err) 98 if (err)
100 return; 99 return;
101 mnt = kern_mount_data(&proc_fs_type, &init_pid_ns); 100 err = pid_ns_prepare_proc(&init_pid_ns);
102 if (IS_ERR(mnt)) { 101 if (err) {
103 unregister_filesystem(&proc_fs_type); 102 unregister_filesystem(&proc_fs_type);
104 return; 103 return;
105 } 104 }
106 105
107 init_pid_ns.proc_mnt = mnt;
108 proc_symlink("mounts", NULL, "self/mounts"); 106 proc_symlink("mounts", NULL, "self/mounts");
109 107
110 proc_net_init(); 108 proc_net_init();
@@ -209,5 +207,5 @@ int pid_ns_prepare_proc(struct pid_namespace *ns)
209 207
210void pid_ns_release_proc(struct pid_namespace *ns) 208void pid_ns_release_proc(struct pid_namespace *ns)
211{ 209{
212 mntput(ns->proc_mnt); 210 kern_unmount(ns->proc_mnt);
213} 211}
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 42b274da92c3..2a30d67dd6b8 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -32,7 +32,7 @@ static cputime64_t get_idle_time(int cpu)
32 idle = kstat_cpu(cpu).cpustat.idle; 32 idle = kstat_cpu(cpu).cpustat.idle;
33 idle = cputime64_add(idle, arch_idle_time(cpu)); 33 idle = cputime64_add(idle, arch_idle_time(cpu));
34 } else 34 } else
35 idle = usecs_to_cputime(idle_time); 35 idle = nsecs_to_jiffies64(1000 * idle_time);
36 36
37 return idle; 37 return idle;
38} 38}
@@ -46,7 +46,7 @@ static cputime64_t get_iowait_time(int cpu)
46 /* !NO_HZ so we can rely on cpustat.iowait */ 46 /* !NO_HZ so we can rely on cpustat.iowait */
47 iowait = kstat_cpu(cpu).cpustat.iowait; 47 iowait = kstat_cpu(cpu).cpustat.iowait;
48 else 48 else
49 iowait = usecs_to_cputime(iowait_time); 49 iowait = nsecs_to_jiffies64(1000 * iowait_time);
50 50
51 return iowait; 51 return iowait;
52} 52}
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 2bd620f0d796..57bbf9078ac8 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -167,6 +167,7 @@ int pstore_register(struct pstore_info *psi)
167 } 167 }
168 168
169 psinfo = psi; 169 psinfo = psi;
170 mutex_init(&psinfo->read_mutex);
170 spin_unlock(&pstore_lock); 171 spin_unlock(&pstore_lock);
171 172
172 if (owner && !try_module_get(owner)) { 173 if (owner && !try_module_get(owner)) {
@@ -195,30 +196,32 @@ EXPORT_SYMBOL_GPL(pstore_register);
195void pstore_get_records(int quiet) 196void pstore_get_records(int quiet)
196{ 197{
197 struct pstore_info *psi = psinfo; 198 struct pstore_info *psi = psinfo;
199 char *buf = NULL;
198 ssize_t size; 200 ssize_t size;
199 u64 id; 201 u64 id;
200 enum pstore_type_id type; 202 enum pstore_type_id type;
201 struct timespec time; 203 struct timespec time;
202 int failed = 0, rc; 204 int failed = 0, rc;
203 unsigned long flags;
204 205
205 if (!psi) 206 if (!psi)
206 return; 207 return;
207 208
208 spin_lock_irqsave(&psinfo->buf_lock, flags); 209 mutex_lock(&psi->read_mutex);
209 rc = psi->open(psi); 210 rc = psi->open(psi);
210 if (rc) 211 if (rc)
211 goto out; 212 goto out;
212 213
213 while ((size = psi->read(&id, &type, &time, psi)) > 0) { 214 while ((size = psi->read(&id, &type, &time, &buf, psi)) > 0) {
214 rc = pstore_mkfile(type, psi->name, id, psi->buf, (size_t)size, 215 rc = pstore_mkfile(type, psi->name, id, buf, (size_t)size,
215 time, psi); 216 time, psi);
217 kfree(buf);
218 buf = NULL;
216 if (rc && (rc != -EEXIST || !quiet)) 219 if (rc && (rc != -EEXIST || !quiet))
217 failed++; 220 failed++;
218 } 221 }
219 psi->close(psi); 222 psi->close(psi);
220out: 223out:
221 spin_unlock_irqrestore(&psinfo->buf_lock, flags); 224 mutex_unlock(&psi->read_mutex);
222 225
223 if (failed) 226 if (failed)
224 printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n", 227 printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n",
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 05d6b0e78c95..dba43c3ea3af 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -449,8 +449,6 @@ EXPORT_SYMBOL(seq_path);
449 449
450/* 450/*
451 * Same as seq_path, but relative to supplied root. 451 * Same as seq_path, but relative to supplied root.
452 *
453 * root may be changed, see __d_path().
454 */ 452 */
455int seq_path_root(struct seq_file *m, struct path *path, struct path *root, 453int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
456 char *esc) 454 char *esc)
@@ -463,6 +461,8 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
463 char *p; 461 char *p;
464 462
465 p = __d_path(path, root, buf, size); 463 p = __d_path(path, root, buf, size);
464 if (!p)
465 return SEQ_SKIP;
466 res = PTR_ERR(p); 466 res = PTR_ERR(p);
467 if (!IS_ERR(p)) { 467 if (!IS_ERR(p)) {
468 char *end = mangle_path(buf, p, esc); 468 char *end = mangle_path(buf, p, esc);
@@ -474,7 +474,7 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
474 } 474 }
475 seq_commit(m, res); 475 seq_commit(m, res);
476 476
477 return res < 0 ? res : 0; 477 return res < 0 && res != -ENAMETOOLONG ? res : 0;
478} 478}
479 479
480/* 480/*
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 20403dc5d437..ae0e76bb6ebf 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2264,19 +2264,12 @@ static int __init ubifs_init(void)
2264 return -EINVAL; 2264 return -EINVAL;
2265 } 2265 }
2266 2266
2267 err = register_filesystem(&ubifs_fs_type);
2268 if (err) {
2269 ubifs_err("cannot register file system, error %d", err);
2270 return err;
2271 }
2272
2273 err = -ENOMEM;
2274 ubifs_inode_slab = kmem_cache_create("ubifs_inode_slab", 2267 ubifs_inode_slab = kmem_cache_create("ubifs_inode_slab",
2275 sizeof(struct ubifs_inode), 0, 2268 sizeof(struct ubifs_inode), 0,
2276 SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT, 2269 SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT,
2277 &inode_slab_ctor); 2270 &inode_slab_ctor);
2278 if (!ubifs_inode_slab) 2271 if (!ubifs_inode_slab)
2279 goto out_reg; 2272 return -ENOMEM;
2280 2273
2281 register_shrinker(&ubifs_shrinker_info); 2274 register_shrinker(&ubifs_shrinker_info);
2282 2275
@@ -2288,15 +2281,20 @@ static int __init ubifs_init(void)
2288 if (err) 2281 if (err)
2289 goto out_compr; 2282 goto out_compr;
2290 2283
2284 err = register_filesystem(&ubifs_fs_type);
2285 if (err) {
2286 ubifs_err("cannot register file system, error %d", err);
2287 goto out_dbg;
2288 }
2291 return 0; 2289 return 0;
2292 2290
2291out_dbg:
2292 dbg_debugfs_exit();
2293out_compr: 2293out_compr:
2294 ubifs_compressors_exit(); 2294 ubifs_compressors_exit();
2295out_shrinker: 2295out_shrinker:
2296 unregister_shrinker(&ubifs_shrinker_info); 2296 unregister_shrinker(&ubifs_shrinker_info);
2297 kmem_cache_destroy(ubifs_inode_slab); 2297 kmem_cache_destroy(ubifs_inode_slab);
2298out_reg:
2299 unregister_filesystem(&ubifs_fs_type);
2300 return err; 2298 return err;
2301} 2299}
2302/* late_initcall to let compressors initialize first */ 2300/* late_initcall to let compressors initialize first */
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index b6c4b3795c4a..76e4266d2e7e 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -42,6 +42,8 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
42 int count, i; 42 int count, i;
43 43
44 count = be32_to_cpu(aclp->acl_cnt); 44 count = be32_to_cpu(aclp->acl_cnt);
45 if (count > XFS_ACL_MAX_ENTRIES)
46 return ERR_PTR(-EFSCORRUPTED);
45 47
46 acl = posix_acl_alloc(count, GFP_KERNEL); 48 acl = posix_acl_alloc(count, GFP_KERNEL);
47 if (!acl) 49 if (!acl)
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 33b13310ee0c..574d4ee9b625 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -189,7 +189,7 @@ xfs_end_io(
189 int error = 0; 189 int error = 0;
190 190
191 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 191 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
192 error = -EIO; 192 ioend->io_error = -EIO;
193 goto done; 193 goto done;
194 } 194 }
195 if (ioend->io_error) 195 if (ioend->io_error)
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index d4906e7c9787..c1b55e596551 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -110,6 +110,7 @@ xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
110/* 110/*
111 * Query whether the requested number of additional bytes of extended 111 * Query whether the requested number of additional bytes of extended
112 * attribute space will be able to fit inline. 112 * attribute space will be able to fit inline.
113 *
113 * Returns zero if not, else the di_forkoff fork offset to be used in the 114 * Returns zero if not, else the di_forkoff fork offset to be used in the
114 * literal area for attribute data once the new bytes have been added. 115 * literal area for attribute data once the new bytes have been added.
115 * 116 *
@@ -122,7 +123,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
122 int offset; 123 int offset;
123 int minforkoff; /* lower limit on valid forkoff locations */ 124 int minforkoff; /* lower limit on valid forkoff locations */
124 int maxforkoff; /* upper limit on valid forkoff locations */ 125 int maxforkoff; /* upper limit on valid forkoff locations */
125 int dsize; 126 int dsize;
126 xfs_mount_t *mp = dp->i_mount; 127 xfs_mount_t *mp = dp->i_mount;
127 128
128 offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */ 129 offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */
@@ -136,47 +137,60 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
136 return (offset >= minforkoff) ? minforkoff : 0; 137 return (offset >= minforkoff) ? minforkoff : 0;
137 } 138 }
138 139
139 if (!(mp->m_flags & XFS_MOUNT_ATTR2)) { 140 /*
140 if (bytes <= XFS_IFORK_ASIZE(dp)) 141 * If the requested numbers of bytes is smaller or equal to the
141 return dp->i_d.di_forkoff; 142 * current attribute fork size we can always proceed.
143 *
144 * Note that if_bytes in the data fork might actually be larger than
145 * the current data fork size is due to delalloc extents. In that
146 * case either the extent count will go down when they are converted
147 * to real extents, or the delalloc conversion will take care of the
148 * literal area rebalancing.
149 */
150 if (bytes <= XFS_IFORK_ASIZE(dp))
151 return dp->i_d.di_forkoff;
152
153 /*
154 * For attr2 we can try to move the forkoff if there is space in the
155 * literal area, but for the old format we are done if there is no
156 * space in the fixed attribute fork.
157 */
158 if (!(mp->m_flags & XFS_MOUNT_ATTR2))
142 return 0; 159 return 0;
143 }
144 160
145 dsize = dp->i_df.if_bytes; 161 dsize = dp->i_df.if_bytes;
146 162
147 switch (dp->i_d.di_format) { 163 switch (dp->i_d.di_format) {
148 case XFS_DINODE_FMT_EXTENTS: 164 case XFS_DINODE_FMT_EXTENTS:
149 /* 165 /*
150 * If there is no attr fork and the data fork is extents, 166 * If there is no attr fork and the data fork is extents,
151 * determine if creating the default attr fork will result 167 * determine if creating the default attr fork will result
152 * in the extents form migrating to btree. If so, the 168 * in the extents form migrating to btree. If so, the
153 * minimum offset only needs to be the space required for 169 * minimum offset only needs to be the space required for
154 * the btree root. 170 * the btree root.
155 */ 171 */
156 if (!dp->i_d.di_forkoff && dp->i_df.if_bytes > 172 if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
157 xfs_default_attroffset(dp)) 173 xfs_default_attroffset(dp))
158 dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 174 dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
159 break; 175 break;
160
161 case XFS_DINODE_FMT_BTREE: 176 case XFS_DINODE_FMT_BTREE:
162 /* 177 /*
163 * If have data btree then keep forkoff if we have one, 178 * If we have a data btree then keep forkoff if we have one,
164 * otherwise we are adding a new attr, so then we set 179 * otherwise we are adding a new attr, so then we set
165 * minforkoff to where the btree root can finish so we have 180 * minforkoff to where the btree root can finish so we have
166 * plenty of room for attrs 181 * plenty of room for attrs
167 */ 182 */
168 if (dp->i_d.di_forkoff) { 183 if (dp->i_d.di_forkoff) {
169 if (offset < dp->i_d.di_forkoff) 184 if (offset < dp->i_d.di_forkoff)
170 return 0; 185 return 0;
171 else 186 return dp->i_d.di_forkoff;
172 return dp->i_d.di_forkoff; 187 }
173 } else 188 dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
174 dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
175 break; 189 break;
176 } 190 }
177 191
178 /* 192 /*
179 * A data fork btree root must have space for at least 193 * A data fork btree root must have space for at least
180 * MINDBTPTRS key/ptr pairs if the data fork is small or empty. 194 * MINDBTPTRS key/ptr pairs if the data fork is small or empty.
181 */ 195 */
182 minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS)); 196 minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
@@ -186,10 +200,10 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
186 maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS); 200 maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
187 maxforkoff = maxforkoff >> 3; /* rounded down */ 201 maxforkoff = maxforkoff >> 3; /* rounded down */
188 202
189 if (offset >= minforkoff && offset < maxforkoff)
190 return offset;
191 if (offset >= maxforkoff) 203 if (offset >= maxforkoff)
192 return maxforkoff; 204 return maxforkoff;
205 if (offset >= minforkoff)
206 return offset;
193 return 0; 207 return 0;
194} 208}
195 209
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index c68baeb0974a..d0ab78837057 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2383,6 +2383,8 @@ xfs_bmap_btalloc(
2383 int tryagain; 2383 int tryagain;
2384 int error; 2384 int error;
2385 2385
2386 ASSERT(ap->length);
2387
2386 mp = ap->ip->i_mount; 2388 mp = ap->ip->i_mount;
2387 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; 2389 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
2388 if (unlikely(align)) { 2390 if (unlikely(align)) {
@@ -4629,6 +4631,8 @@ xfs_bmapi_allocate(
4629 int error; 4631 int error;
4630 int rt; 4632 int rt;
4631 4633
4634 ASSERT(bma->length > 0);
4635
4632 rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip); 4636 rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);
4633 4637
4634 /* 4638 /*
@@ -4849,6 +4853,7 @@ xfs_bmapi_write(
4849 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4853 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4850 ASSERT(!(flags & XFS_BMAPI_IGSTATE)); 4854 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4851 ASSERT(tp != NULL); 4855 ASSERT(tp != NULL);
4856 ASSERT(len > 0);
4852 4857
4853 whichfork = (flags & XFS_BMAPI_ATTRFORK) ? 4858 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4854 XFS_ATTR_FORK : XFS_DATA_FORK; 4859 XFS_ATTR_FORK : XFS_DATA_FORK;
@@ -4918,9 +4923,22 @@ xfs_bmapi_write(
4918 bma.eof = eof; 4923 bma.eof = eof;
4919 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4924 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4920 bma.wasdel = wasdelay; 4925 bma.wasdel = wasdelay;
4921 bma.length = len;
4922 bma.offset = bno; 4926 bma.offset = bno;
4923 4927
4928 /*
4929 * There's a 32/64 bit type mismatch between the
4930 * allocation length request (which can be 64 bits in
4931 * length) and the bma length request, which is
4932 * xfs_extlen_t and therefore 32 bits. Hence we have to
4933 * check for 32-bit overflows and handle them here.
4934 */
4935 if (len > (xfs_filblks_t)MAXEXTLEN)
4936 bma.length = MAXEXTLEN;
4937 else
4938 bma.length = len;
4939
4940 ASSERT(len > 0);
4941 ASSERT(bma.length > 0);
4924 error = xfs_bmapi_allocate(&bma, flags); 4942 error = xfs_bmapi_allocate(&bma, flags);
4925 if (error) 4943 if (error)
4926 goto error0; 4944 goto error0;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 1a3513881bce..eac97ef81e2a 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -656,7 +656,7 @@ xfs_buf_item_committing(
656/* 656/*
657 * This is the ops vector shared by all buf log items. 657 * This is the ops vector shared by all buf log items.
658 */ 658 */
659static struct xfs_item_ops xfs_buf_item_ops = { 659static const struct xfs_item_ops xfs_buf_item_ops = {
660 .iop_size = xfs_buf_item_size, 660 .iop_size = xfs_buf_item_size,
661 .iop_format = xfs_buf_item_format, 661 .iop_format = xfs_buf_item_format,
662 .iop_pin = xfs_buf_item_pin, 662 .iop_pin = xfs_buf_item_pin,
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index bb3f71d236d2..0dee0b71029d 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -295,7 +295,7 @@ xfs_qm_dquot_logitem_committing(
295/* 295/*
296 * This is the ops vector for dquots 296 * This is the ops vector for dquots
297 */ 297 */
298static struct xfs_item_ops xfs_dquot_item_ops = { 298static const struct xfs_item_ops xfs_dquot_item_ops = {
299 .iop_size = xfs_qm_dquot_logitem_size, 299 .iop_size = xfs_qm_dquot_logitem_size,
300 .iop_format = xfs_qm_dquot_logitem_format, 300 .iop_format = xfs_qm_dquot_logitem_format,
301 .iop_pin = xfs_qm_dquot_logitem_pin, 301 .iop_pin = xfs_qm_dquot_logitem_pin,
@@ -483,7 +483,7 @@ xfs_qm_qoff_logitem_committing(
483{ 483{
484} 484}
485 485
486static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = { 486static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
487 .iop_size = xfs_qm_qoff_logitem_size, 487 .iop_size = xfs_qm_qoff_logitem_size,
488 .iop_format = xfs_qm_qoff_logitem_format, 488 .iop_format = xfs_qm_qoff_logitem_format,
489 .iop_pin = xfs_qm_qoff_logitem_pin, 489 .iop_pin = xfs_qm_qoff_logitem_pin,
@@ -498,7 +498,7 @@ static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
498/* 498/*
499 * This is the ops vector shared by all quotaoff-start log items. 499 * This is the ops vector shared by all quotaoff-start log items.
500 */ 500 */
501static struct xfs_item_ops xfs_qm_qoff_logitem_ops = { 501static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
502 .iop_size = xfs_qm_qoff_logitem_size, 502 .iop_size = xfs_qm_qoff_logitem_size,
503 .iop_format = xfs_qm_qoff_logitem_format, 503 .iop_format = xfs_qm_qoff_logitem_format,
504 .iop_pin = xfs_qm_qoff_logitem_pin, 504 .iop_pin = xfs_qm_qoff_logitem_pin,
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index da108977b21f..558910f5e3c0 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -98,22 +98,22 @@ xfs_fs_encode_fh(
98 switch (fileid_type) { 98 switch (fileid_type) {
99 case FILEID_INO32_GEN_PARENT: 99 case FILEID_INO32_GEN_PARENT:
100 spin_lock(&dentry->d_lock); 100 spin_lock(&dentry->d_lock);
101 fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino; 101 fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
102 fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation; 102 fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
103 spin_unlock(&dentry->d_lock); 103 spin_unlock(&dentry->d_lock);
104 /*FALLTHRU*/ 104 /*FALLTHRU*/
105 case FILEID_INO32_GEN: 105 case FILEID_INO32_GEN:
106 fid->i32.ino = inode->i_ino; 106 fid->i32.ino = XFS_I(inode)->i_ino;
107 fid->i32.gen = inode->i_generation; 107 fid->i32.gen = inode->i_generation;
108 break; 108 break;
109 case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG: 109 case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
110 spin_lock(&dentry->d_lock); 110 spin_lock(&dentry->d_lock);
111 fid64->parent_ino = dentry->d_parent->d_inode->i_ino; 111 fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
112 fid64->parent_gen = dentry->d_parent->d_inode->i_generation; 112 fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
113 spin_unlock(&dentry->d_lock); 113 spin_unlock(&dentry->d_lock);
114 /*FALLTHRU*/ 114 /*FALLTHRU*/
115 case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG: 115 case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
116 fid64->ino = inode->i_ino; 116 fid64->ino = XFS_I(inode)->i_ino;
117 fid64->gen = inode->i_generation; 117 fid64->gen = inode->i_generation;
118 break; 118 break;
119 } 119 }
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index d22e62623437..35c2aff38b20 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -217,7 +217,7 @@ xfs_efi_item_committing(
217/* 217/*
218 * This is the ops vector shared by all efi log items. 218 * This is the ops vector shared by all efi log items.
219 */ 219 */
220static struct xfs_item_ops xfs_efi_item_ops = { 220static const struct xfs_item_ops xfs_efi_item_ops = {
221 .iop_size = xfs_efi_item_size, 221 .iop_size = xfs_efi_item_size,
222 .iop_format = xfs_efi_item_format, 222 .iop_format = xfs_efi_item_format,
223 .iop_pin = xfs_efi_item_pin, 223 .iop_pin = xfs_efi_item_pin,
@@ -477,7 +477,7 @@ xfs_efd_item_committing(
477/* 477/*
478 * This is the ops vector shared by all efd log items. 478 * This is the ops vector shared by all efd log items.
479 */ 479 */
480static struct xfs_item_ops xfs_efd_item_ops = { 480static const struct xfs_item_ops xfs_efd_item_ops = {
481 .iop_size = xfs_efd_item_size, 481 .iop_size = xfs_efd_item_size,
482 .iop_format = xfs_efd_item_format, 482 .iop_format = xfs_efd_item_format,
483 .iop_pin = xfs_efd_item_pin, 483 .iop_pin = xfs_efd_item_pin,
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index c0237c602f11..755ee8164880 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2835,6 +2835,27 @@ corrupt_out:
2835 return XFS_ERROR(EFSCORRUPTED); 2835 return XFS_ERROR(EFSCORRUPTED);
2836} 2836}
2837 2837
2838void
2839xfs_promote_inode(
2840 struct xfs_inode *ip)
2841{
2842 struct xfs_buf *bp;
2843
2844 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2845
2846 bp = xfs_incore(ip->i_mount->m_ddev_targp, ip->i_imap.im_blkno,
2847 ip->i_imap.im_len, XBF_TRYLOCK);
2848 if (!bp)
2849 return;
2850
2851 if (XFS_BUF_ISDELAYWRITE(bp)) {
2852 xfs_buf_delwri_promote(bp);
2853 wake_up_process(ip->i_mount->m_ddev_targp->bt_task);
2854 }
2855
2856 xfs_buf_relse(bp);
2857}
2858
2838/* 2859/*
2839 * Return a pointer to the extent record at file index idx. 2860 * Return a pointer to the extent record at file index idx.
2840 */ 2861 */
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 760140d1dd66..b4cd4739f98e 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -498,6 +498,7 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
498void xfs_iext_realloc(xfs_inode_t *, int, int); 498void xfs_iext_realloc(xfs_inode_t *, int, int);
499void xfs_iunpin_wait(xfs_inode_t *); 499void xfs_iunpin_wait(xfs_inode_t *);
500int xfs_iflush(xfs_inode_t *, uint); 500int xfs_iflush(xfs_inode_t *, uint);
501void xfs_promote_inode(struct xfs_inode *);
501void xfs_lock_inodes(xfs_inode_t **, int, uint); 502void xfs_lock_inodes(xfs_inode_t **, int, uint);
502void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); 503void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
503 504
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index b7cf21ba240f..abaafdbb3e65 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -795,7 +795,7 @@ xfs_inode_item_committing(
795/* 795/*
796 * This is the ops vector shared by all buf log items. 796 * This is the ops vector shared by all buf log items.
797 */ 797 */
798static struct xfs_item_ops xfs_inode_item_ops = { 798static const struct xfs_item_ops xfs_inode_item_ops = {
799 .iop_size = xfs_inode_item_size, 799 .iop_size = xfs_inode_item_size,
800 .iop_format = xfs_inode_item_format, 800 .iop_format = xfs_inode_item_format,
801 .iop_pin = xfs_inode_item_pin, 801 .iop_pin = xfs_inode_item_pin,
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 2758a6277c52..34817adf4b9e 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -150,6 +150,117 @@ xlog_grant_add_space(
150 } while (head_val != old); 150 } while (head_val != old);
151} 151}
152 152
153STATIC bool
154xlog_reserveq_wake(
155 struct log *log,
156 int *free_bytes)
157{
158 struct xlog_ticket *tic;
159 int need_bytes;
160
161 list_for_each_entry(tic, &log->l_reserveq, t_queue) {
162 if (tic->t_flags & XLOG_TIC_PERM_RESERV)
163 need_bytes = tic->t_unit_res * tic->t_cnt;
164 else
165 need_bytes = tic->t_unit_res;
166
167 if (*free_bytes < need_bytes)
168 return false;
169 *free_bytes -= need_bytes;
170
171 trace_xfs_log_grant_wake_up(log, tic);
172 wake_up(&tic->t_wait);
173 }
174
175 return true;
176}
177
178STATIC bool
179xlog_writeq_wake(
180 struct log *log,
181 int *free_bytes)
182{
183 struct xlog_ticket *tic;
184 int need_bytes;
185
186 list_for_each_entry(tic, &log->l_writeq, t_queue) {
187 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
188
189 need_bytes = tic->t_unit_res;
190
191 if (*free_bytes < need_bytes)
192 return false;
193 *free_bytes -= need_bytes;
194
195 trace_xfs_log_regrant_write_wake_up(log, tic);
196 wake_up(&tic->t_wait);
197 }
198
199 return true;
200}
201
202STATIC int
203xlog_reserveq_wait(
204 struct log *log,
205 struct xlog_ticket *tic,
206 int need_bytes)
207{
208 list_add_tail(&tic->t_queue, &log->l_reserveq);
209
210 do {
211 if (XLOG_FORCED_SHUTDOWN(log))
212 goto shutdown;
213 xlog_grant_push_ail(log, need_bytes);
214
215 XFS_STATS_INC(xs_sleep_logspace);
216 trace_xfs_log_grant_sleep(log, tic);
217
218 xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
219 trace_xfs_log_grant_wake(log, tic);
220
221 spin_lock(&log->l_grant_reserve_lock);
222 if (XLOG_FORCED_SHUTDOWN(log))
223 goto shutdown;
224 } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes);
225
226 list_del_init(&tic->t_queue);
227 return 0;
228shutdown:
229 list_del_init(&tic->t_queue);
230 return XFS_ERROR(EIO);
231}
232
233STATIC int
234xlog_writeq_wait(
235 struct log *log,
236 struct xlog_ticket *tic,
237 int need_bytes)
238{
239 list_add_tail(&tic->t_queue, &log->l_writeq);
240
241 do {
242 if (XLOG_FORCED_SHUTDOWN(log))
243 goto shutdown;
244 xlog_grant_push_ail(log, need_bytes);
245
246 XFS_STATS_INC(xs_sleep_logspace);
247 trace_xfs_log_regrant_write_sleep(log, tic);
248
249 xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
250 trace_xfs_log_regrant_write_wake(log, tic);
251
252 spin_lock(&log->l_grant_write_lock);
253 if (XLOG_FORCED_SHUTDOWN(log))
254 goto shutdown;
255 } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes);
256
257 list_del_init(&tic->t_queue);
258 return 0;
259shutdown:
260 list_del_init(&tic->t_queue);
261 return XFS_ERROR(EIO);
262}
263
153static void 264static void
154xlog_tic_reset_res(xlog_ticket_t *tic) 265xlog_tic_reset_res(xlog_ticket_t *tic)
155{ 266{
@@ -350,8 +461,19 @@ xfs_log_reserve(
350 retval = xlog_grant_log_space(log, internal_ticket); 461 retval = xlog_grant_log_space(log, internal_ticket);
351 } 462 }
352 463
464 if (unlikely(retval)) {
465 /*
466 * If we are failing, make sure the ticket doesn't have any
467 * current reservations. We don't want to add this back
468 * when the ticket/ transaction gets cancelled.
469 */
470 internal_ticket->t_curr_res = 0;
471 /* ungrant will give back unit_res * t_cnt. */
472 internal_ticket->t_cnt = 0;
473 }
474
353 return retval; 475 return retval;
354} /* xfs_log_reserve */ 476}
355 477
356 478
357/* 479/*
@@ -626,7 +748,7 @@ xfs_log_item_init(
626 struct xfs_mount *mp, 748 struct xfs_mount *mp,
627 struct xfs_log_item *item, 749 struct xfs_log_item *item,
628 int type, 750 int type,
629 struct xfs_item_ops *ops) 751 const struct xfs_item_ops *ops)
630{ 752{
631 item->li_mountp = mp; 753 item->li_mountp = mp;
632 item->li_ailp = mp->m_ail; 754 item->li_ailp = mp->m_ail;
@@ -2481,8 +2603,8 @@ restart:
2481/* 2603/*
2482 * Atomically get the log space required for a log ticket. 2604 * Atomically get the log space required for a log ticket.
2483 * 2605 *
2484 * Once a ticket gets put onto the reserveq, it will only return after 2606 * Once a ticket gets put onto the reserveq, it will only return after the
2485 * the needed reservation is satisfied. 2607 * needed reservation is satisfied.
2486 * 2608 *
2487 * This function is structured so that it has a lock free fast path. This is 2609 * This function is structured so that it has a lock free fast path. This is
2488 * necessary because every new transaction reservation will come through this 2610 * necessary because every new transaction reservation will come through this
@@ -2490,113 +2612,53 @@ restart:
2490 * every pass. 2612 * every pass.
2491 * 2613 *
2492 * As tickets are only ever moved on and off the reserveq under the 2614 * As tickets are only ever moved on and off the reserveq under the
2493 * l_grant_reserve_lock, we only need to take that lock if we are going 2615 * l_grant_reserve_lock, we only need to take that lock if we are going to add
2494 * to add the ticket to the queue and sleep. We can avoid taking the lock if the 2616 * the ticket to the queue and sleep. We can avoid taking the lock if the ticket
2495 * ticket was never added to the reserveq because the t_queue list head will be 2617 * was never added to the reserveq because the t_queue list head will be empty
2496 * empty and we hold the only reference to it so it can safely be checked 2618 * and we hold the only reference to it so it can safely be checked unlocked.
2497 * unlocked.
2498 */ 2619 */
2499STATIC int 2620STATIC int
2500xlog_grant_log_space(xlog_t *log, 2621xlog_grant_log_space(
2501 xlog_ticket_t *tic) 2622 struct log *log,
2623 struct xlog_ticket *tic)
2502{ 2624{
2503 int free_bytes; 2625 int free_bytes, need_bytes;
2504 int need_bytes; 2626 int error = 0;
2505 2627
2506#ifdef DEBUG 2628 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
2507 if (log->l_flags & XLOG_ACTIVE_RECOVERY)
2508 panic("grant Recovery problem");
2509#endif
2510 2629
2511 trace_xfs_log_grant_enter(log, tic); 2630 trace_xfs_log_grant_enter(log, tic);
2512 2631
2632 /*
2633 * If there are other waiters on the queue then give them a chance at
2634 * logspace before us. Wake up the first waiters, if we do not wake
2635 * up all the waiters then go to sleep waiting for more free space,
2636 * otherwise try to get some space for this transaction.
2637 */
2513 need_bytes = tic->t_unit_res; 2638 need_bytes = tic->t_unit_res;
2514 if (tic->t_flags & XFS_LOG_PERM_RESERV) 2639 if (tic->t_flags & XFS_LOG_PERM_RESERV)
2515 need_bytes *= tic->t_ocnt; 2640 need_bytes *= tic->t_ocnt;
2516
2517 /* something is already sleeping; insert new transaction at end */
2518 if (!list_empty_careful(&log->l_reserveq)) {
2519 spin_lock(&log->l_grant_reserve_lock);
2520 /* recheck the queue now we are locked */
2521 if (list_empty(&log->l_reserveq)) {
2522 spin_unlock(&log->l_grant_reserve_lock);
2523 goto redo;
2524 }
2525 list_add_tail(&tic->t_queue, &log->l_reserveq);
2526
2527 trace_xfs_log_grant_sleep1(log, tic);
2528
2529 /*
2530 * Gotta check this before going to sleep, while we're
2531 * holding the grant lock.
2532 */
2533 if (XLOG_FORCED_SHUTDOWN(log))
2534 goto error_return;
2535
2536 XFS_STATS_INC(xs_sleep_logspace);
2537 xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
2538
2539 /*
2540 * If we got an error, and the filesystem is shutting down,
2541 * we'll catch it down below. So just continue...
2542 */
2543 trace_xfs_log_grant_wake1(log, tic);
2544 }
2545
2546redo:
2547 if (XLOG_FORCED_SHUTDOWN(log))
2548 goto error_return_unlocked;
2549
2550 free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); 2641 free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
2551 if (free_bytes < need_bytes) { 2642 if (!list_empty_careful(&log->l_reserveq)) {
2552 spin_lock(&log->l_grant_reserve_lock); 2643 spin_lock(&log->l_grant_reserve_lock);
2553 if (list_empty(&tic->t_queue)) 2644 if (!xlog_reserveq_wake(log, &free_bytes) ||
2554 list_add_tail(&tic->t_queue, &log->l_reserveq); 2645 free_bytes < need_bytes)
2555 2646 error = xlog_reserveq_wait(log, tic, need_bytes);
2556 trace_xfs_log_grant_sleep2(log, tic); 2647 spin_unlock(&log->l_grant_reserve_lock);
2557 2648 } else if (free_bytes < need_bytes) {
2558 if (XLOG_FORCED_SHUTDOWN(log))
2559 goto error_return;
2560
2561 xlog_grant_push_ail(log, need_bytes);
2562
2563 XFS_STATS_INC(xs_sleep_logspace);
2564 xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
2565
2566 trace_xfs_log_grant_wake2(log, tic);
2567 goto redo;
2568 }
2569
2570 if (!list_empty(&tic->t_queue)) {
2571 spin_lock(&log->l_grant_reserve_lock); 2649 spin_lock(&log->l_grant_reserve_lock);
2572 list_del_init(&tic->t_queue); 2650 error = xlog_reserveq_wait(log, tic, need_bytes);
2573 spin_unlock(&log->l_grant_reserve_lock); 2651 spin_unlock(&log->l_grant_reserve_lock);
2574 } 2652 }
2653 if (error)
2654 return error;
2575 2655
2576 /* we've got enough space */
2577 xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes); 2656 xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
2578 xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); 2657 xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
2579 trace_xfs_log_grant_exit(log, tic); 2658 trace_xfs_log_grant_exit(log, tic);
2580 xlog_verify_grant_tail(log); 2659 xlog_verify_grant_tail(log);
2581 return 0; 2660 return 0;
2582 2661}
2583error_return_unlocked:
2584 spin_lock(&log->l_grant_reserve_lock);
2585error_return:
2586 list_del_init(&tic->t_queue);
2587 spin_unlock(&log->l_grant_reserve_lock);
2588 trace_xfs_log_grant_error(log, tic);
2589
2590 /*
2591 * If we are failing, make sure the ticket doesn't have any
2592 * current reservations. We don't want to add this back when
2593 * the ticket/transaction gets cancelled.
2594 */
2595 tic->t_curr_res = 0;
2596 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
2597 return XFS_ERROR(EIO);
2598} /* xlog_grant_log_space */
2599
2600 2662
2601/* 2663/*
2602 * Replenish the byte reservation required by moving the grant write head. 2664 * Replenish the byte reservation required by moving the grant write head.
@@ -2605,10 +2667,12 @@ error_return:
2605 * free fast path. 2667 * free fast path.
2606 */ 2668 */
2607STATIC int 2669STATIC int
2608xlog_regrant_write_log_space(xlog_t *log, 2670xlog_regrant_write_log_space(
2609 xlog_ticket_t *tic) 2671 struct log *log,
2672 struct xlog_ticket *tic)
2610{ 2673{
2611 int free_bytes, need_bytes; 2674 int free_bytes, need_bytes;
2675 int error = 0;
2612 2676
2613 tic->t_curr_res = tic->t_unit_res; 2677 tic->t_curr_res = tic->t_unit_res;
2614 xlog_tic_reset_res(tic); 2678 xlog_tic_reset_res(tic);
@@ -2616,104 +2680,38 @@ xlog_regrant_write_log_space(xlog_t *log,
2616 if (tic->t_cnt > 0) 2680 if (tic->t_cnt > 0)
2617 return 0; 2681 return 0;
2618 2682
2619#ifdef DEBUG 2683 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
2620 if (log->l_flags & XLOG_ACTIVE_RECOVERY)
2621 panic("regrant Recovery problem");
2622#endif
2623 2684
2624 trace_xfs_log_regrant_write_enter(log, tic); 2685 trace_xfs_log_regrant_write_enter(log, tic);
2625 if (XLOG_FORCED_SHUTDOWN(log))
2626 goto error_return_unlocked;
2627 2686
2628 /* If there are other waiters on the queue then give them a 2687 /*
2629 * chance at logspace before us. Wake up the first waiters, 2688 * If there are other waiters on the queue then give them a chance at
2630 * if we do not wake up all the waiters then go to sleep waiting 2689 * logspace before us. Wake up the first waiters, if we do not wake
2631 * for more free space, otherwise try to get some space for 2690 * up all the waiters then go to sleep waiting for more free space,
2632 * this transaction. 2691 * otherwise try to get some space for this transaction.
2633 */ 2692 */
2634 need_bytes = tic->t_unit_res; 2693 need_bytes = tic->t_unit_res;
2635 if (!list_empty_careful(&log->l_writeq)) {
2636 struct xlog_ticket *ntic;
2637
2638 spin_lock(&log->l_grant_write_lock);
2639 free_bytes = xlog_space_left(log, &log->l_grant_write_head);
2640 list_for_each_entry(ntic, &log->l_writeq, t_queue) {
2641 ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
2642
2643 if (free_bytes < ntic->t_unit_res)
2644 break;
2645 free_bytes -= ntic->t_unit_res;
2646 wake_up(&ntic->t_wait);
2647 }
2648
2649 if (ntic != list_first_entry(&log->l_writeq,
2650 struct xlog_ticket, t_queue)) {
2651 if (list_empty(&tic->t_queue))
2652 list_add_tail(&tic->t_queue, &log->l_writeq);
2653 trace_xfs_log_regrant_write_sleep1(log, tic);
2654
2655 xlog_grant_push_ail(log, need_bytes);
2656
2657 XFS_STATS_INC(xs_sleep_logspace);
2658 xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
2659 trace_xfs_log_regrant_write_wake1(log, tic);
2660 } else
2661 spin_unlock(&log->l_grant_write_lock);
2662 }
2663
2664redo:
2665 if (XLOG_FORCED_SHUTDOWN(log))
2666 goto error_return_unlocked;
2667
2668 free_bytes = xlog_space_left(log, &log->l_grant_write_head); 2694 free_bytes = xlog_space_left(log, &log->l_grant_write_head);
2669 if (free_bytes < need_bytes) { 2695 if (!list_empty_careful(&log->l_writeq)) {
2670 spin_lock(&log->l_grant_write_lock); 2696 spin_lock(&log->l_grant_write_lock);
2671 if (list_empty(&tic->t_queue)) 2697 if (!xlog_writeq_wake(log, &free_bytes) ||
2672 list_add_tail(&tic->t_queue, &log->l_writeq); 2698 free_bytes < need_bytes)
2673 2699 error = xlog_writeq_wait(log, tic, need_bytes);
2674 if (XLOG_FORCED_SHUTDOWN(log)) 2700 spin_unlock(&log->l_grant_write_lock);
2675 goto error_return; 2701 } else if (free_bytes < need_bytes) {
2676
2677 xlog_grant_push_ail(log, need_bytes);
2678
2679 XFS_STATS_INC(xs_sleep_logspace);
2680 trace_xfs_log_regrant_write_sleep2(log, tic);
2681 xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
2682
2683 trace_xfs_log_regrant_write_wake2(log, tic);
2684 goto redo;
2685 }
2686
2687 if (!list_empty(&tic->t_queue)) {
2688 spin_lock(&log->l_grant_write_lock); 2702 spin_lock(&log->l_grant_write_lock);
2689 list_del_init(&tic->t_queue); 2703 error = xlog_writeq_wait(log, tic, need_bytes);
2690 spin_unlock(&log->l_grant_write_lock); 2704 spin_unlock(&log->l_grant_write_lock);
2691 } 2705 }
2692 2706
2693 /* we've got enough space */ 2707 if (error)
2708 return error;
2709
2694 xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); 2710 xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
2695 trace_xfs_log_regrant_write_exit(log, tic); 2711 trace_xfs_log_regrant_write_exit(log, tic);
2696 xlog_verify_grant_tail(log); 2712 xlog_verify_grant_tail(log);
2697 return 0; 2713 return 0;
2698 2714}
2699
2700 error_return_unlocked:
2701 spin_lock(&log->l_grant_write_lock);
2702 error_return:
2703 list_del_init(&tic->t_queue);
2704 spin_unlock(&log->l_grant_write_lock);
2705 trace_xfs_log_regrant_write_error(log, tic);
2706
2707 /*
2708 * If we are failing, make sure the ticket doesn't have any
2709 * current reservations. We don't want to add this back when
2710 * the ticket/transaction gets cancelled.
2711 */
2712 tic->t_curr_res = 0;
2713 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
2714 return XFS_ERROR(EIO);
2715} /* xlog_regrant_write_log_space */
2716
2717 2715
2718/* The first cnt-1 times through here we don't need to 2716/* The first cnt-1 times through here we don't need to
2719 * move the grant write head because the permanent 2717 * move the grant write head because the permanent
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 78c9039994af..3f7bf451c034 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -137,7 +137,7 @@ struct xfs_trans;
137void xfs_log_item_init(struct xfs_mount *mp, 137void xfs_log_item_init(struct xfs_mount *mp,
138 struct xfs_log_item *item, 138 struct xfs_log_item *item,
139 int type, 139 int type,
140 struct xfs_item_ops *ops); 140 const struct xfs_item_ops *ops);
141 141
142xfs_lsn_t xfs_log_done(struct xfs_mount *mp, 142xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
143 struct xlog_ticket *ticket, 143 struct xlog_ticket *ticket,
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 5cff443f6cdb..0bbb1a41998b 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -674,7 +674,8 @@ xfs_qm_dqattach_one(
674 * disk and we didn't ask it to allocate; 674 * disk and we didn't ask it to allocate;
675 * ESRCH if quotas got turned off suddenly. 675 * ESRCH if quotas got turned off suddenly.
676 */ 676 */
677 error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp); 677 error = xfs_qm_dqget(ip->i_mount, ip, id, type,
678 doalloc | XFS_QMOPT_DOWARN, &dqp);
678 if (error) 679 if (error)
679 return error; 680 return error;
680 681
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index aa3dc1a4d53d..be5c51d8f757 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -770,6 +770,17 @@ restart:
770 if (!xfs_iflock_nowait(ip)) { 770 if (!xfs_iflock_nowait(ip)) {
771 if (!(sync_mode & SYNC_WAIT)) 771 if (!(sync_mode & SYNC_WAIT))
772 goto out; 772 goto out;
773
774 /*
775 * If we only have a single dirty inode in a cluster there is
776 * a fair chance that the AIL push may have pushed it into
777 * the buffer, but xfsbufd won't touch it until 30 seconds
778 * from now, and thus we will lock up here.
779 *
780 * Promote the inode buffer to the front of the delwri list
781 * and wake up xfsbufd now.
782 */
783 xfs_promote_inode(ip);
773 xfs_iflock(ip); 784 xfs_iflock(ip);
774 } 785 }
775 786
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index f1d2802b2f07..494035798873 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -834,18 +834,14 @@ DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
834DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter); 834DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
835DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit); 835DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
836DEFINE_LOGGRANT_EVENT(xfs_log_grant_error); 836DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
837DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1); 837DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
838DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1); 838DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
839DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
840DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
841DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up); 839DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
842DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter); 840DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
843DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit); 841DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
844DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error); 842DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
845DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1); 843DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep);
846DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1); 844DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake);
847DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
848DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
849DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up); 845DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
850DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter); 846DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
851DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit); 847DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 603f3eb52041..3ae713c0abd9 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -326,7 +326,7 @@ typedef struct xfs_log_item {
326 struct xfs_log_item *); 326 struct xfs_log_item *);
327 /* buffer item iodone */ 327 /* buffer item iodone */
328 /* callback func */ 328 /* callback func */
329 struct xfs_item_ops *li_ops; /* function list */ 329 const struct xfs_item_ops *li_ops; /* function list */
330 330
331 /* delayed logging */ 331 /* delayed logging */
332 struct list_head li_cil; /* CIL pointers */ 332 struct list_head li_cil; /* CIL pointers */
@@ -341,7 +341,7 @@ typedef struct xfs_log_item {
341 { XFS_LI_IN_AIL, "IN_AIL" }, \ 341 { XFS_LI_IN_AIL, "IN_AIL" }, \
342 { XFS_LI_ABORTED, "ABORTED" } 342 { XFS_LI_ABORTED, "ABORTED" }
343 343
344typedef struct xfs_item_ops { 344struct xfs_item_ops {
345 uint (*iop_size)(xfs_log_item_t *); 345 uint (*iop_size)(xfs_log_item_t *);
346 void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); 346 void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
347 void (*iop_pin)(xfs_log_item_t *); 347 void (*iop_pin)(xfs_log_item_t *);
@@ -352,7 +352,7 @@ typedef struct xfs_item_ops {
352 void (*iop_push)(xfs_log_item_t *); 352 void (*iop_push)(xfs_log_item_t *);
353 bool (*iop_pushbuf)(xfs_log_item_t *); 353 bool (*iop_pushbuf)(xfs_log_item_t *);
354 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); 354 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
355} xfs_item_ops_t; 355};
356 356
357#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip) 357#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip)
358#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp) 358#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp)
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 4ecf2a549060..ce9268a2f56b 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -112,7 +112,7 @@ xfs_readlink(
112 char *link) 112 char *link)
113{ 113{
114 xfs_mount_t *mp = ip->i_mount; 114 xfs_mount_t *mp = ip->i_mount;
115 int pathlen; 115 xfs_fsize_t pathlen;
116 int error = 0; 116 int error = 0;
117 117
118 trace_xfs_readlink(ip); 118 trace_xfs_readlink(ip);
@@ -122,13 +122,19 @@ xfs_readlink(
122 122
123 xfs_ilock(ip, XFS_ILOCK_SHARED); 123 xfs_ilock(ip, XFS_ILOCK_SHARED);
124 124
125 ASSERT(S_ISLNK(ip->i_d.di_mode));
126 ASSERT(ip->i_d.di_size <= MAXPATHLEN);
127
128 pathlen = ip->i_d.di_size; 125 pathlen = ip->i_d.di_size;
129 if (!pathlen) 126 if (!pathlen)
130 goto out; 127 goto out;
131 128
129 if (pathlen < 0 || pathlen > MAXPATHLEN) {
130 xfs_alert(mp, "%s: inode (%llu) bad symlink length (%lld)",
131 __func__, (unsigned long long) ip->i_ino,
132 (long long) pathlen);
133 ASSERT(0);
134 return XFS_ERROR(EFSCORRUPTED);
135 }
136
137
132 if (ip->i_df.if_flags & XFS_IFINLINE) { 138 if (ip->i_df.if_flags & XFS_IFINLINE) {
133 memcpy(link, ip->i_df.if_u1.if_data, pathlen); 139 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
134 link[pathlen] = '\0'; 140 link[pathlen] = '\0';
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index f4c38d8c6674..2292d1af9d70 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -685,9 +685,15 @@ __SYSCALL(__NR_syncfs, sys_syncfs)
685__SYSCALL(__NR_setns, sys_setns) 685__SYSCALL(__NR_setns, sys_setns)
686#define __NR_sendmmsg 269 686#define __NR_sendmmsg 269
687__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg) 687__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
688#define __NR_process_vm_readv 270
689__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
690 compat_sys_process_vm_readv)
691#define __NR_process_vm_writev 271
692__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
693 compat_sys_process_vm_writev)
688 694
689#undef __NR_syscalls 695#undef __NR_syscalls
690#define __NR_syscalls 270 696#define __NR_syscalls 272
691 697
692/* 698/*
693 * All syscalls below here should go away really, 699 * All syscalls below here should go away really,
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 1f630a5d75b4..ecd5984ef689 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -990,7 +990,9 @@ struct drm_minor {
990 struct proc_dir_entry *proc_root; /**< proc directory entry */ 990 struct proc_dir_entry *proc_root; /**< proc directory entry */
991 struct drm_info_node proc_nodes; 991 struct drm_info_node proc_nodes;
992 struct dentry *debugfs_root; 992 struct dentry *debugfs_root;
993 struct drm_info_node debugfs_nodes; 993
994 struct list_head debugfs_list;
995 struct mutex debugfs_lock; /* Protects debugfs_list. */
994 996
995 struct drm_master *master; /* currently active master for this node */ 997 struct drm_master *master; /* currently active master for this node */
996 struct list_head master_list; 998 struct list_head master_list;
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 966fe7daec6f..2a2acda8b437 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -299,6 +299,8 @@ struct drm_mode_fb_cmd2 {
299#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 299#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
300#define DRM_MODE_FB_DIRTY_FLAGS 0x03 300#define DRM_MODE_FB_DIRTY_FLAGS 0x03
301 301
302#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
303
302/* 304/*
303 * Mark a region of a framebuffer as dirty. 305 * Mark a region of a framebuffer as dirty.
304 * 306 *
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 3d53efd25ab9..14b6cd022284 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -4,6 +4,7 @@
4*/ 4*/
5#define radeon_PCI_IDS \ 5#define radeon_PCI_IDS \
6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
7 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
7 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 8 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
8 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 9 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
9 {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 10 {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -55,6 +56,7 @@
55 {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ 56 {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
56 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ 57 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
57 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ 58 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
59 {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
58 {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 60 {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
59 {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 61 {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
60 {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 62 {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
@@ -180,8 +182,11 @@
180 {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 182 {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
181 {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 183 {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
182 {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 184 {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
185 {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
183 {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 186 {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
184 {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 187 {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
188 {0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
189 {0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
185 {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 190 {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
186 {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 191 {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
187 {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 192 {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -193,8 +198,18 @@
193 {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 198 {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
194 {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 199 {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
195 {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 200 {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
201 {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
196 {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 202 {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
197 {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 203 {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
204 {0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
205 {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
206 {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
207 {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
208 {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
209 {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
210 {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
211 {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
212 {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
198 {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 213 {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
199 {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ 214 {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
200 {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ 215 {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
@@ -236,6 +251,7 @@
236 {0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ 251 {0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
237 {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ 252 {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
238 {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ 253 {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
254 {0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
239 {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ 255 {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
240 {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ 256 {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
241 {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 257 {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -478,6 +494,8 @@
478 {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ 494 {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
479 {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ 495 {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
480 {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 496 {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
497 {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
498 {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
481 {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ 499 {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
482 {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ 500 {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
483 {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 501 {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
@@ -492,6 +510,8 @@
492 {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 510 {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
493 {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 511 {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
494 {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 512 {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
513 {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
514 {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
495 {0, 0, 0} 515 {0, 0, 0}
496 516
497#define r128_PCI_IDS \ 517#define r128_PCI_IDS \
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 874c4d271328..12050434d57a 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -32,13 +32,14 @@
32/** 32/**
33 * User-desired buffer creation information structure. 33 * User-desired buffer creation information structure.
34 * 34 *
35 * @size: requested size for the object. 35 * @size: user-desired memory allocation size.
36 * - this size value would be page-aligned internally. 36 * - this size value would be page-aligned internally.
37 * @flags: user request for setting memory type or cache attributes. 37 * @flags: user request for setting memory type or cache attributes.
38 * @handle: returned handle for the object. 38 * @handle: returned a handle to created gem object.
39 * - this handle will be set by gem module of kernel side.
39 */ 40 */
40struct drm_exynos_gem_create { 41struct drm_exynos_gem_create {
41 unsigned int size; 42 uint64_t size;
42 unsigned int flags; 43 unsigned int flags;
43 unsigned int handle; 44 unsigned int handle;
44}; 45};
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index b65be6054a18..be94be6d6f17 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -874,6 +874,10 @@ struct drm_radeon_gem_pwrite {
874 874
875#define RADEON_CHUNK_ID_RELOCS 0x01 875#define RADEON_CHUNK_ID_RELOCS 0x01
876#define RADEON_CHUNK_ID_IB 0x02 876#define RADEON_CHUNK_ID_IB 0x02
877#define RADEON_CHUNK_ID_FLAGS 0x03
878
879/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
880#define RADEON_CS_KEEP_TILING_FLAGS 0x01
877 881
878struct drm_radeon_cs_chunk { 882struct drm_radeon_cs_chunk {
879 uint32_t chunk_id; 883 uint32_t chunk_id;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index a3c071c9e189..847994aef0e9 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -211,8 +211,8 @@ extern void bio_pair_release(struct bio_pair *dbio);
211extern struct bio_set *bioset_create(unsigned int, unsigned int); 211extern struct bio_set *bioset_create(unsigned int, unsigned int);
212extern void bioset_free(struct bio_set *); 212extern void bioset_free(struct bio_set *);
213 213
214extern struct bio *bio_alloc(gfp_t, int); 214extern struct bio *bio_alloc(gfp_t, unsigned int);
215extern struct bio *bio_kmalloc(gfp_t, int); 215extern struct bio *bio_kmalloc(gfp_t, unsigned int);
216extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 216extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
217extern void bio_put(struct bio *); 217extern void bio_put(struct bio *);
218extern void bio_free(struct bio *, struct bio_set *); 218extern void bio_free(struct bio *, struct bio_set *);
@@ -519,7 +519,11 @@ extern void bio_integrity_init(void);
519#define bioset_integrity_create(a, b) (0) 519#define bioset_integrity_create(a, b) (0)
520#define bio_integrity_prep(a) (0) 520#define bio_integrity_prep(a) (0)
521#define bio_integrity_enabled(a) (0) 521#define bio_integrity_enabled(a) (0)
522#define bio_integrity_clone(a, b, c, d) (0) 522static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
523 gfp_t gfp_mask, struct bio_set *bs)
524{
525 return 0;
526}
523#define bioset_integrity_free(a) do { } while (0) 527#define bioset_integrity_free(a) do { } while (0)
524#define bio_integrity_free(a, b) do { } while (0) 528#define bio_integrity_free(a, b) do { } while (0)
525#define bio_integrity_endio(a, b) do { } while (0) 529#define bio_integrity_endio(a, b) do { } while (0)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c7a6d3b5bc7b..94acd8172b5b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -805,9 +805,6 @@ extern void blk_unprep_request(struct request *);
805 */ 805 */
806extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 806extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
807 spinlock_t *lock, int node_id); 807 spinlock_t *lock, int node_id);
808extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
809 request_fn_proc *,
810 spinlock_t *, int node_id);
811extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 808extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
812extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 809extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
813 request_fn_proc *, spinlock_t *); 810 request_fn_proc *, spinlock_t *);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index f88eacb111d4..7c05ac202d90 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -10,6 +10,12 @@
10#include "osdmap.h" 10#include "osdmap.h"
11#include "messenger.h" 11#include "messenger.h"
12 12
13/*
14 * Maximum object name size
15 * (must be at least as big as RBD_MAX_MD_NAME_LEN -- currently 100)
16 */
17#define MAX_OBJ_NAME_SIZE 100
18
13struct ceph_msg; 19struct ceph_msg;
14struct ceph_snap_context; 20struct ceph_snap_context;
15struct ceph_osd_request; 21struct ceph_osd_request;
@@ -75,7 +81,7 @@ struct ceph_osd_request {
75 struct inode *r_inode; /* for use by callbacks */ 81 struct inode *r_inode; /* for use by callbacks */
76 void *r_priv; /* ditto */ 82 void *r_priv; /* ditto */
77 83
78 char r_oid[40]; /* object name */ 84 char r_oid[MAX_OBJ_NAME_SIZE]; /* object name */
79 int r_oid_len; 85 int r_oid_len;
80 unsigned long r_stamp; /* send OR check time */ 86 unsigned long r_stamp; /* send OR check time */
81 87
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 139c4db55f17..c86c940d1de3 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -156,6 +156,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
156 * @mult: cycle to nanosecond multiplier 156 * @mult: cycle to nanosecond multiplier
157 * @shift: cycle to nanosecond divisor (power of two) 157 * @shift: cycle to nanosecond divisor (power of two)
158 * @max_idle_ns: max idle time permitted by the clocksource (nsecs) 158 * @max_idle_ns: max idle time permitted by the clocksource (nsecs)
159 * @maxadj maximum adjustment value to mult (~11%)
159 * @flags: flags describing special properties 160 * @flags: flags describing special properties
160 * @archdata: arch-specific data 161 * @archdata: arch-specific data
161 * @suspend: suspend function for the clocksource, if necessary 162 * @suspend: suspend function for the clocksource, if necessary
@@ -172,7 +173,7 @@ struct clocksource {
172 u32 mult; 173 u32 mult;
173 u32 shift; 174 u32 shift;
174 u64 max_idle_ns; 175 u64 max_idle_ns;
175 176 u32 maxadj;
176#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA 177#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
177 struct arch_clocksource_data archdata; 178 struct arch_clocksource_data archdata;
178#endif 179#endif
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 154bf5683015..66ed067fb729 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -552,5 +552,14 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
552 552
553extern void __user *compat_alloc_user_space(unsigned long len); 553extern void __user *compat_alloc_user_space(unsigned long len);
554 554
555asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
556 const struct compat_iovec __user *lvec,
557 unsigned long liovcnt, const struct compat_iovec __user *rvec,
558 unsigned long riovcnt, unsigned long flags);
559asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
560 const struct compat_iovec __user *lvec,
561 unsigned long liovcnt, const struct compat_iovec __user *rvec,
562 unsigned long riovcnt, unsigned long flags);
563
555#endif /* CONFIG_COMPAT */ 564#endif /* CONFIG_COMPAT */
556#endif /* _LINUX_COMPAT_H */ 565#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 4df926199369..ed9f74f6c519 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -339,7 +339,8 @@ extern int d_validate(struct dentry *, struct dentry *);
339 */ 339 */
340extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); 340extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
341 341
342extern char *__d_path(const struct path *path, struct path *root, char *, int); 342extern char *__d_path(const struct path *, const struct path *, char *, int);
343extern char *d_absolute_path(const struct path *, char *, int);
343extern char *d_path(const struct path *, char *, int); 344extern char *d_path(const struct path *, char *, int);
344extern char *d_path_with_unreachable(const struct path *, char *, int); 345extern char *d_path_with_unreachable(const struct path *, char *, int);
345extern char *dentry_path_raw(struct dentry *, char *, int); 346extern char *dentry_path_raw(struct dentry *, char *, int);
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index afb94583960c..98ce8124b1cc 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -41,7 +41,7 @@ struct devfreq_dev_status {
41 unsigned long total_time; 41 unsigned long total_time;
42 unsigned long busy_time; 42 unsigned long busy_time;
43 unsigned long current_frequency; 43 unsigned long current_frequency;
44 void *private_date; 44 void *private_data;
45}; 45};
46 46
47/** 47/**
diff --git a/include/linux/device.h b/include/linux/device.h
index ffbcf95cd97d..3136ede5a1e1 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -69,7 +69,7 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
69 * @resume: Called to bring a device on this bus out of sleep mode. 69 * @resume: Called to bring a device on this bus out of sleep mode.
70 * @pm: Power management operations of this bus, callback the specific 70 * @pm: Power management operations of this bus, callback the specific
71 * device driver's pm-ops. 71 * device driver's pm-ops.
72 * @iommu_ops IOMMU specific operations for this bus, used to attach IOMMU 72 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
73 * driver implementations to a bus and allow the driver to do 73 * driver implementations to a bus and allow the driver to do
74 * bus-specific setup 74 * bus-specific setup
75 * @p: The private data of the driver core, only the driver core can 75 * @p: The private data of the driver core, only the driver core can
@@ -682,6 +682,11 @@ static inline bool device_async_suspend_enabled(struct device *dev)
682 return !!dev->power.async_suspend; 682 return !!dev->power.async_suspend;
683} 683}
684 684
685static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
686{
687 dev->power.ignore_children = enable;
688}
689
685static inline void device_lock(struct device *dev) 690static inline void device_lock(struct device *dev)
686{ 691{
687 mutex_lock(&dev->mutex); 692 mutex_lock(&dev->mutex);
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index ef90cbd8e173..57c9a8ae4f2d 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -31,6 +31,7 @@ extern void free_dmar_iommu(struct intel_iommu *iommu);
31extern int iommu_calculate_agaw(struct intel_iommu *iommu); 31extern int iommu_calculate_agaw(struct intel_iommu *iommu);
32extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); 32extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
33extern int dmar_disabled; 33extern int dmar_disabled;
34extern int intel_iommu_enabled;
34#else 35#else
35static inline int iommu_calculate_agaw(struct intel_iommu *iommu) 36static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
36{ 37{
@@ -44,6 +45,7 @@ static inline void free_dmar_iommu(struct intel_iommu *iommu)
44{ 45{
45} 46}
46#define dmar_disabled (1) 47#define dmar_disabled (1)
48#define intel_iommu_enabled (0)
47#endif 49#endif
48 50
49 51
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0c4df261af7e..e0bc4ffb8e7f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -393,8 +393,8 @@ struct inodes_stat_t {
393#include <linux/semaphore.h> 393#include <linux/semaphore.h>
394#include <linux/fiemap.h> 394#include <linux/fiemap.h>
395#include <linux/rculist_bl.h> 395#include <linux/rculist_bl.h>
396#include <linux/shrinker.h>
397#include <linux/atomic.h> 396#include <linux/atomic.h>
397#include <linux/shrinker.h>
398 398
399#include <asm/byteorder.h> 399#include <asm/byteorder.h>
400 400
@@ -1886,6 +1886,7 @@ extern struct dentry *mount_single(struct file_system_type *fs_type,
1886extern struct dentry *mount_nodev(struct file_system_type *fs_type, 1886extern struct dentry *mount_nodev(struct file_system_type *fs_type,
1887 int flags, void *data, 1887 int flags, void *data,
1888 int (*fill_super)(struct super_block *, void *, int)); 1888 int (*fill_super)(struct super_block *, void *, int));
1889extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
1889void generic_shutdown_super(struct super_block *sb); 1890void generic_shutdown_super(struct super_block *sb);
1890void kill_block_super(struct super_block *sb); 1891void kill_block_super(struct super_block *sb);
1891void kill_anon_super(struct super_block *sb); 1892void kill_anon_super(struct super_block *sb);
@@ -1941,6 +1942,7 @@ extern int fd_statfs(int, struct kstatfs *);
1941extern int statfs_by_dentry(struct dentry *, struct kstatfs *); 1942extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
1942extern int freeze_super(struct super_block *super); 1943extern int freeze_super(struct super_block *super);
1943extern int thaw_super(struct super_block *super); 1944extern int thaw_super(struct super_block *super);
1945extern bool our_mnt(struct vfsmount *mnt);
1944 1946
1945extern int current_umask(void); 1947extern int current_umask(void);
1946 1948
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 96efa6794ea5..c3da42dd22ba 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -172,6 +172,7 @@ enum {
172 TRACE_EVENT_FL_FILTERED_BIT, 172 TRACE_EVENT_FL_FILTERED_BIT,
173 TRACE_EVENT_FL_RECORDED_CMD_BIT, 173 TRACE_EVENT_FL_RECORDED_CMD_BIT,
174 TRACE_EVENT_FL_CAP_ANY_BIT, 174 TRACE_EVENT_FL_CAP_ANY_BIT,
175 TRACE_EVENT_FL_NO_SET_FILTER_BIT,
175}; 176};
176 177
177enum { 178enum {
@@ -179,6 +180,7 @@ enum {
179 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), 180 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
180 TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT), 181 TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
181 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), 182 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
183 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
182}; 184};
183 185
184struct ftrace_event_call { 186struct ftrace_event_call {
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 9de31bc98c88..6d18f3531f18 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -21,8 +21,6 @@
21#define dev_to_part(device) container_of((device), struct hd_struct, __dev) 21#define dev_to_part(device) container_of((device), struct hd_struct, __dev)
22#define disk_to_dev(disk) (&(disk)->part0.__dev) 22#define disk_to_dev(disk) (&(disk)->part0.__dev)
23#define part_to_dev(part) (&((part)->__dev)) 23#define part_to_dev(part) (&((part)->__dev))
24#define alias_name(disk) ((disk)->alias ? (disk)->alias : \
25 (disk)->disk_name)
26 24
27extern struct device_type part_type; 25extern struct device_type part_type;
28extern struct kobject *block_depr; 26extern struct kobject *block_depr;
@@ -60,7 +58,6 @@ enum {
60 58
61#define DISK_MAX_PARTS 256 59#define DISK_MAX_PARTS 256
62#define DISK_NAME_LEN 32 60#define DISK_NAME_LEN 32
63#define ALIAS_LEN 256
64 61
65#include <linux/major.h> 62#include <linux/major.h>
66#include <linux/device.h> 63#include <linux/device.h>
@@ -166,7 +163,6 @@ struct gendisk {
166 * disks that can't be partitioned. */ 163 * disks that can't be partitioned. */
167 164
168 char disk_name[DISK_NAME_LEN]; /* name of major driver */ 165 char disk_name[DISK_NAME_LEN]; /* name of major driver */
169 char *alias; /* alias name of disk */
170 char *(*devnode)(struct gendisk *gd, mode_t *mode); 166 char *(*devnode)(struct gendisk *gd, mode_t *mode);
171 167
172 unsigned int events; /* supported events */ 168 unsigned int events; /* supported events */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 19644e0016bd..d9d6c868b86b 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -110,11 +110,6 @@ static inline void copy_huge_page(struct page *dst, struct page *src)
110 110
111#define hugetlb_change_protection(vma, address, end, newprot) 111#define hugetlb_change_protection(vma, address, end, newprot)
112 112
113#ifndef HPAGE_MASK
114#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
115#define HPAGE_SIZE PAGE_SIZE
116#endif
117
118#endif /* !CONFIG_HUGETLB_PAGE */ 113#endif /* !CONFIG_HUGETLB_PAGE */
119 114
120#define HUGETLB_ANON_FILE "anon_hugepage" 115#define HUGETLB_ANON_FILE "anon_hugepage"
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index 08a2fee40659..aad6bd4b3efd 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -118,7 +118,6 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
118static inline 118static inline
119void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) 119void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
120{ 120{
121 return 0;
122} 121}
123 122
124static inline int hwspin_lock_get_id(struct hwspinlock *hwlock) 123static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index a81bf6d23b3e..07d103a06d64 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -432,9 +432,6 @@ void i2c_unlock_adapter(struct i2c_adapter *);
432/* Internal numbers to terminate lists */ 432/* Internal numbers to terminate lists */
433#define I2C_CLIENT_END 0xfffeU 433#define I2C_CLIENT_END 0xfffeU
434 434
435/* The numbers to use to set I2C bus address */
436#define ANY_I2C_BUS 0xffff
437
438/* Construct an I2C_CLIENT_END-terminated array of i2c addresses */ 435/* Construct an I2C_CLIENT_END-terminated array of i2c addresses */
439#define I2C_ADDRS(addr, addrs...) \ 436#define I2C_ADDRS(addr, addrs...) \
440 ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END }) 437 ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END })
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 80b480c97532..abf5028db981 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -98,9 +98,10 @@ enum {
98 INET_DIAG_VEGASINFO, 98 INET_DIAG_VEGASINFO,
99 INET_DIAG_CONG, 99 INET_DIAG_CONG,
100 INET_DIAG_TOS, 100 INET_DIAG_TOS,
101 INET_DIAG_TCLASS,
101}; 102};
102 103
103#define INET_DIAG_MAX INET_DIAG_TOS 104#define INET_DIAG_MAX INET_DIAG_TCLASS
104 105
105 106
106/* INET_DIAG_MEM */ 107/* INET_DIAG_MEM */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 08ffab01e76c..32574eef9394 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -126,6 +126,8 @@ extern struct cred init_cred;
126# define INIT_PERF_EVENTS(tsk) 126# define INIT_PERF_EVENTS(tsk)
127#endif 127#endif
128 128
129#define INIT_TASK_COMM "swapper"
130
129/* 131/*
130 * INIT_TASK is used to set up the first task table, touch at 132 * INIT_TASK is used to set up the first task table, touch at
131 * your own risk!. Base=0, limit=0x1fffff (=2MB) 133 * your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -162,7 +164,7 @@ extern struct cred init_cred;
162 .group_leader = &tsk, \ 164 .group_leader = &tsk, \
163 RCU_INIT_POINTER(.real_cred, &init_cred), \ 165 RCU_INIT_POINTER(.real_cred, &init_cred), \
164 RCU_INIT_POINTER(.cred, &init_cred), \ 166 RCU_INIT_POINTER(.cred, &init_cred), \
165 .comm = "swapper", \ 167 .comm = INIT_TASK_COMM, \
166 .thread = INIT_THREAD, \ 168 .thread = INIT_THREAD, \
167 .fs = &init_fs, \ 169 .fs = &init_fs, \
168 .files = &init_files, \ 170 .files = &init_files, \
@@ -184,7 +186,6 @@ extern struct cred init_cred;
184 [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ 186 [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
185 }, \ 187 }, \
186 .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ 188 .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
187 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
188 INIT_IDS \ 189 INIT_IDS \
189 INIT_PERF_EVENTS(tsk) \ 190 INIT_PERF_EVENTS(tsk) \
190 INIT_TRACE_IRQFLAGS \ 191 INIT_TRACE_IRQFLAGS \
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index f47fcd30273d..c3892fc1d538 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -555,7 +555,6 @@ struct kvm_ppc_pvinfo {
555#define KVM_CAP_PPC_SMT 64 555#define KVM_CAP_PPC_SMT 64
556#define KVM_CAP_PPC_RMA 65 556#define KVM_CAP_PPC_RMA 65
557#define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ 557#define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */
558#define KVM_CAP_PPC_HIOR 67
559#define KVM_CAP_PPC_PAPR 68 558#define KVM_CAP_PPC_PAPR 68
560#define KVM_CAP_S390_GMAP 71 559#define KVM_CAP_S390_GMAP 71
561 560
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 25b808631cd9..fd7ff3d91e6a 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -185,7 +185,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
185#define rounddown_pow_of_two(n) \ 185#define rounddown_pow_of_two(n) \
186( \ 186( \
187 __builtin_constant_p(n) ? ( \ 187 __builtin_constant_p(n) ? ( \
188 (n == 1) ? 0 : \
189 (1UL << ilog2(n))) : \ 188 (1UL << ilog2(n))) : \
190 __rounddown_pow_of_two(n) \ 189 __rounddown_pow_of_two(n) \
191 ) 190 )
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 82b4c8801a4f..8bf2cb9502dd 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -243,7 +243,8 @@
243 243
244 244
245/*Registers VDD1, VDD2 voltage values definitions */ 245/*Registers VDD1, VDD2 voltage values definitions */
246#define VDD1_2_NUM_VOLTS 73 246#define VDD1_2_NUM_VOLT_FINE 73
247#define VDD1_2_NUM_VOLT_COARSE 3
247#define VDD1_2_MIN_VOLT 6000 248#define VDD1_2_MIN_VOLT 6000
248#define VDD1_2_OFFSET 125 249#define VDD1_2_OFFSET 125
249 250
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index fae295048a8b..83a9caec0e43 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -1963,6 +1963,21 @@
1963#define WM8958_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */ 1963#define WM8958_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */
1964 1964
1965/* 1965/*
1966 * R210 (0xD2) - Mic Detect 3
1967 */
1968#define WM8958_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */
1969#define WM8958_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */
1970#define WM8958_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */
1971#define WM8958_MICD_VALID 0x0002 /* MICD_VALID */
1972#define WM8958_MICD_VALID_MASK 0x0002 /* MICD_VALID */
1973#define WM8958_MICD_VALID_SHIFT 1 /* MICD_VALID */
1974#define WM8958_MICD_VALID_WIDTH 1 /* MICD_VALID */
1975#define WM8958_MICD_STS 0x0001 /* MICD_STS */
1976#define WM8958_MICD_STS_MASK 0x0001 /* MICD_STS */
1977#define WM8958_MICD_STS_SHIFT 0 /* MICD_STS */
1978#define WM8958_MICD_STS_WIDTH 1 /* MICD_STS */
1979
1980/*
1966 * R76 (0x4C) - Charge Pump (1) 1981 * R76 (0x4C) - Charge Pump (1)
1967 */ 1982 */
1968#define WM8994_CP_ENA 0x8000 /* CP_ENA */ 1983#define WM8994_CP_ENA 0x8000 /* CP_ENA */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3dc3a8c2c485..4baadd18f4ad 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -10,6 +10,7 @@
10#include <linux/mmzone.h> 10#include <linux/mmzone.h>
11#include <linux/rbtree.h> 11#include <linux/rbtree.h>
12#include <linux/prio_tree.h> 12#include <linux/prio_tree.h>
13#include <linux/atomic.h>
13#include <linux/debug_locks.h> 14#include <linux/debug_locks.h>
14#include <linux/mm_types.h> 15#include <linux/mm_types.h>
15#include <linux/range.h> 16#include <linux/range.h>
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 415f2db414e1..c8ef9bc54d50 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -218,6 +218,7 @@ struct mmc_card {
218#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ 218#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */
219#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ 219#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
220#define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ 220#define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */
221#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
221 /* byte mode */ 222 /* byte mode */
222 unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ 223 unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */
223#define MMC_NO_POWER_NOTIFICATION 0 224#define MMC_NO_POWER_NOTIFICATION 0
@@ -433,6 +434,11 @@ static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
433 return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512; 434 return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
434} 435}
435 436
437static inline int mmc_card_long_read_time(const struct mmc_card *c)
438{
439 return c->quirks & MMC_QUIRK_LONG_READ_TIME;
440}
441
436#define mmc_card_name(c) ((c)->cid.prod_name) 442#define mmc_card_name(c) ((c)->cid.prod_name)
437#define mmc_card_id(c) (dev_name(&(c)->dev)) 443#define mmc_card_id(c) (dev_name(&(c)->dev))
438 444
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cbeb5867cff7..a82ad4dd306a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2536,6 +2536,8 @@ extern void net_disable_timestamp(void);
2536extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); 2536extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
2537extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); 2537extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2538extern void dev_seq_stop(struct seq_file *seq, void *v); 2538extern void dev_seq_stop(struct seq_file *seq, void *v);
2539extern int dev_seq_open_ops(struct inode *inode, struct file *file,
2540 const struct seq_operations *ops);
2539#endif 2541#endif
2540 2542
2541extern int netdev_class_create_file(struct class_attribute *class_attr); 2543extern int netdev_class_create_file(struct class_attribute *class_attr);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index ab2c6343361a..92ecf5585fac 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -410,6 +410,9 @@ extern const struct inode_operations nfs_file_inode_operations;
410extern const struct inode_operations nfs3_file_inode_operations; 410extern const struct inode_operations nfs3_file_inode_operations;
411#endif /* CONFIG_NFS_V3 */ 411#endif /* CONFIG_NFS_V3 */
412extern const struct file_operations nfs_file_operations; 412extern const struct file_operations nfs_file_operations;
413#ifdef CONFIG_NFS_V4
414extern const struct file_operations nfs4_file_operations;
415#endif /* CONFIG_NFS_V4 */
413extern const struct address_space_operations nfs_file_aops; 416extern const struct address_space_operations nfs_file_aops;
414extern const struct address_space_operations nfs_dir_aops; 417extern const struct address_space_operations nfs_dir_aops;
415 418
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index c74595ba7094..2a7c533be5dd 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1192,6 +1192,7 @@ struct nfs_rpc_ops {
1192 const struct dentry_operations *dentry_ops; 1192 const struct dentry_operations *dentry_ops;
1193 const struct inode_operations *dir_inode_ops; 1193 const struct inode_operations *dir_inode_ops;
1194 const struct inode_operations *file_inode_ops; 1194 const struct inode_operations *file_inode_ops;
1195 const struct file_operations *file_ops;
1195 1196
1196 int (*getroot) (struct nfs_server *, struct nfs_fh *, 1197 int (*getroot) (struct nfs_server *, struct nfs_fh *,
1197 struct nfs_fsinfo *); 1198 struct nfs_fsinfo *);
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index e3d0b3890249..7ef68724f0f0 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -12,7 +12,7 @@ struct pci_ats {
12 unsigned int is_enabled:1; /* Enable bit is set */ 12 unsigned int is_enabled:1; /* Enable bit is set */
13}; 13};
14 14
15#ifdef CONFIG_PCI_IOV 15#ifdef CONFIG_PCI_ATS
16 16
17extern int pci_enable_ats(struct pci_dev *dev, int ps); 17extern int pci_enable_ats(struct pci_dev *dev, int ps);
18extern void pci_disable_ats(struct pci_dev *dev); 18extern void pci_disable_ats(struct pci_dev *dev);
@@ -29,7 +29,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
29 return dev->ats && dev->ats->is_enabled; 29 return dev->ats && dev->ats->is_enabled;
30} 30}
31 31
32#else /* CONFIG_PCI_IOV */ 32#else /* CONFIG_PCI_ATS */
33 33
34static inline int pci_enable_ats(struct pci_dev *dev, int ps) 34static inline int pci_enable_ats(struct pci_dev *dev, int ps)
35{ 35{
@@ -50,7 +50,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
50 return 0; 50 return 0;
51} 51}
52 52
53#endif /* CONFIG_PCI_IOV */ 53#endif /* CONFIG_PCI_ATS */
54 54
55#ifdef CONFIG_PCI_PRI 55#ifdef CONFIG_PCI_PRI
56 56
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 337df0d5d5f7..7cda65b5f798 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -338,7 +338,7 @@ struct pci_dev {
338 struct list_head msi_list; 338 struct list_head msi_list;
339#endif 339#endif
340 struct pci_vpd *vpd; 340 struct pci_vpd *vpd;
341#ifdef CONFIG_PCI_IOV 341#ifdef CONFIG_PCI_ATS
342 union { 342 union {
343 struct pci_sriov *sriov; /* SR-IOV capability related */ 343 struct pci_sriov *sriov; /* SR-IOV capability related */
344 struct pci_dev *physfn; /* the PF this VF is associated with */ 344 struct pci_dev *physfn; /* the PF this VF is associated with */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 3fdf251389de..2aaee0ca9da8 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -517,8 +517,12 @@
517#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 517#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
518#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 518#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
519#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 519#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
520#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
521#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
522#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
520#define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603 523#define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603
521#define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604 524#define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604
525#define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605
522#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 526#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
523#define PCI_DEVICE_ID_AMD_LANCE 0x2000 527#define PCI_DEVICE_ID_AMD_LANCE 0x2000
524#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 528#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -2405,6 +2409,8 @@
2405 2409
2406#define PCI_VENDOR_ID_AZWAVE 0x1a3b 2410#define PCI_VENDOR_ID_AZWAVE 0x1a3b
2407 2411
2412#define PCI_VENDOR_ID_ASMEDIA 0x1b21
2413
2408#define PCI_VENDOR_ID_TEKRAM 0x1de1 2414#define PCI_VENDOR_ID_TEKRAM 0x1de1
2409#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 2415#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
2410 2416
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1e9ebe5e0091..b1f89122bf6a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -822,6 +822,7 @@ struct perf_event {
822 int mmap_locked; 822 int mmap_locked;
823 struct user_struct *mmap_user; 823 struct user_struct *mmap_user;
824 struct ring_buffer *rb; 824 struct ring_buffer *rb;
825 struct list_head rb_entry;
825 826
826 /* poll related */ 827 /* poll related */
827 wait_queue_head_t waitq; 828 wait_queue_head_t waitq;
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 3605e947fa90..04c011038f32 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -121,6 +121,7 @@ extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev);
121extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev); 121extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev);
122#else 122#else
123 123
124struct pinctrl_dev;
124 125
125/* Sufficiently stupid default function when pinctrl is not in use */ 126/* Sufficiently stupid default function when pinctrl is not in use */
126static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin) 127static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin)
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index c5336705921f..7281d5acf2f9 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -30,7 +30,7 @@
30 */ 30 */
31 31
32struct tc_stats { 32struct tc_stats {
33 __u64 bytes; /* NUmber of enqueues bytes */ 33 __u64 bytes; /* Number of enqueued bytes */
34 __u32 packets; /* Number of enqueued packets */ 34 __u32 packets; /* Number of enqueued packets */
35 __u32 drops; /* Packets dropped because of lack of resources */ 35 __u32 drops; /* Packets dropped because of lack of resources */
36 __u32 overlimits; /* Number of throttle events when this 36 __u32 overlimits; /* Number of throttle events when this
@@ -297,7 +297,7 @@ struct tc_htb_glob {
297 __u32 debug; /* debug flags */ 297 __u32 debug; /* debug flags */
298 298
299 /* stats */ 299 /* stats */
300 __u32 direct_pkts; /* count of non shapped packets */ 300 __u32 direct_pkts; /* count of non shaped packets */
301}; 301};
302enum { 302enum {
303 TCA_HTB_UNSPEC, 303 TCA_HTB_UNSPEC,
@@ -503,7 +503,7 @@ enum {
503}; 503};
504#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1) 504#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
505 505
506/* State transition probablities for 4 state model */ 506/* State transition probabilities for 4 state model */
507struct tc_netem_gimodel { 507struct tc_netem_gimodel {
508 __u32 p13; 508 __u32 p13;
509 __u32 p31; 509 __u32 p31;
diff --git a/include/linux/pm.h b/include/linux/pm.h
index f15acb646813..3f3ed83a9aa5 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -54,118 +54,145 @@ typedef struct pm_message {
54/** 54/**
55 * struct dev_pm_ops - device PM callbacks 55 * struct dev_pm_ops - device PM callbacks
56 * 56 *
57 * Several driver power state transitions are externally visible, affecting 57 * Several device power state transitions are externally visible, affecting
58 * the state of pending I/O queues and (for drivers that touch hardware) 58 * the state of pending I/O queues and (for drivers that touch hardware)
59 * interrupts, wakeups, DMA, and other hardware state. There may also be 59 * interrupts, wakeups, DMA, and other hardware state. There may also be
60 * internal transitions to various low power modes, which are transparent 60 * internal transitions to various low-power modes which are transparent
61 * to the rest of the driver stack (such as a driver that's ON gating off 61 * to the rest of the driver stack (such as a driver that's ON gating off
62 * clocks which are not in active use). 62 * clocks which are not in active use).
63 * 63 *
64 * The externally visible transitions are handled with the help of the following 64 * The externally visible transitions are handled with the help of callbacks
65 * callbacks included in this structure: 65 * included in this structure in such a way that two levels of callbacks are
66 * 66 * involved. First, the PM core executes callbacks provided by PM domains,
67 * @prepare: Prepare the device for the upcoming transition, but do NOT change 67 * device types, classes and bus types. They are the subsystem-level callbacks
68 * its hardware state. Prevent new children of the device from being 68 * supposed to execute callbacks provided by device drivers, although they may
69 * registered after @prepare() returns (the driver's subsystem and 69 * choose not to do that. If the driver callbacks are executed, they have to
70 * generally the rest of the kernel is supposed to prevent new calls to the 70 * collaborate with the subsystem-level callbacks to achieve the goals
71 * probe method from being made too once @prepare() has succeeded). If 71 * appropriate for the given system transition, given transition phase and the
72 * @prepare() detects a situation it cannot handle (e.g. registration of a 72 * subsystem the device belongs to.
73 * child already in progress), it may return -EAGAIN, so that the PM core 73 *
74 * can execute it once again (e.g. after the new child has been registered) 74 * @prepare: The principal role of this callback is to prevent new children of
75 * to recover from the race condition. This method is executed for all 75 * the device from being registered after it has returned (the driver's
76 * kinds of suspend transitions and is followed by one of the suspend 76 * subsystem and generally the rest of the kernel is supposed to prevent
77 * callbacks: @suspend(), @freeze(), or @poweroff(). 77 * new calls to the probe method from being made too once @prepare() has
78 * The PM core executes @prepare() for all devices before starting to 78 * succeeded). If @prepare() detects a situation it cannot handle (e.g.
79 * execute suspend callbacks for any of them, so drivers may assume all of 79 * registration of a child already in progress), it may return -EAGAIN, so
80 * the other devices to be present and functional while @prepare() is being 80 * that the PM core can execute it once again (e.g. after a new child has
81 * executed. In particular, it is safe to make GFP_KERNEL memory 81 * been registered) to recover from the race condition.
82 * allocations from within @prepare(). However, drivers may NOT assume 82 * This method is executed for all kinds of suspend transitions and is
83 * anything about the availability of the user space at that time and it 83 * followed by one of the suspend callbacks: @suspend(), @freeze(), or
84 * is not correct to request firmware from within @prepare() (it's too 84 * @poweroff(). The PM core executes subsystem-level @prepare() for all
85 * late to do that). [To work around this limitation, drivers may 85 * devices before starting to invoke suspend callbacks for any of them, so
86 * register suspend and hibernation notifiers that are executed before the 86 * generally devices may be assumed to be functional or to respond to
87 * freezing of tasks.] 87 * runtime resume requests while @prepare() is being executed. However,
88 * device drivers may NOT assume anything about the availability of user
89 * space at that time and it is NOT valid to request firmware from within
90 * @prepare() (it's too late to do that). It also is NOT valid to allocate
91 * substantial amounts of memory from @prepare() in the GFP_KERNEL mode.
92 * [To work around these limitations, drivers may register suspend and
93 * hibernation notifiers to be executed before the freezing of tasks.]
88 * 94 *
89 * @complete: Undo the changes made by @prepare(). This method is executed for 95 * @complete: Undo the changes made by @prepare(). This method is executed for
90 * all kinds of resume transitions, following one of the resume callbacks: 96 * all kinds of resume transitions, following one of the resume callbacks:
91 * @resume(), @thaw(), @restore(). Also called if the state transition 97 * @resume(), @thaw(), @restore(). Also called if the state transition
92 * fails before the driver's suspend callback (@suspend(), @freeze(), 98 * fails before the driver's suspend callback: @suspend(), @freeze() or
93 * @poweroff()) can be executed (e.g. if the suspend callback fails for one 99 * @poweroff(), can be executed (e.g. if the suspend callback fails for one
94 * of the other devices that the PM core has unsuccessfully attempted to 100 * of the other devices that the PM core has unsuccessfully attempted to
95 * suspend earlier). 101 * suspend earlier).
96 * The PM core executes @complete() after it has executed the appropriate 102 * The PM core executes subsystem-level @complete() after it has executed
97 * resume callback for all devices. 103 * the appropriate resume callbacks for all devices.
98 * 104 *
99 * @suspend: Executed before putting the system into a sleep state in which the 105 * @suspend: Executed before putting the system into a sleep state in which the
100 * contents of main memory are preserved. Quiesce the device, put it into 106 * contents of main memory are preserved. The exact action to perform
101 * a low power state appropriate for the upcoming system state (such as 107 * depends on the device's subsystem (PM domain, device type, class or bus
102 * PCI_D3hot), and enable wakeup events as appropriate. 108 * type), but generally the device must be quiescent after subsystem-level
109 * @suspend() has returned, so that it doesn't do any I/O or DMA.
110 * Subsystem-level @suspend() is executed for all devices after invoking
111 * subsystem-level @prepare() for all of them.
103 * 112 *
104 * @resume: Executed after waking the system up from a sleep state in which the 113 * @resume: Executed after waking the system up from a sleep state in which the
105 * contents of main memory were preserved. Put the device into the 114 * contents of main memory were preserved. The exact action to perform
106 * appropriate state, according to the information saved in memory by the 115 * depends on the device's subsystem, but generally the driver is expected
107 * preceding @suspend(). The driver starts working again, responding to 116 * to start working again, responding to hardware events and software
108 * hardware events and software requests. The hardware may have gone 117 * requests (the device itself may be left in a low-power state, waiting
109 * through a power-off reset, or it may have maintained state from the 118 * for a runtime resume to occur). The state of the device at the time its
110 * previous suspend() which the driver may rely on while resuming. On most 119 * driver's @resume() callback is run depends on the platform and subsystem
111 * platforms, there are no restrictions on availability of resources like 120 * the device belongs to. On most platforms, there are no restrictions on
112 * clocks during @resume(). 121 * availability of resources like clocks during @resume().
122 * Subsystem-level @resume() is executed for all devices after invoking
123 * subsystem-level @resume_noirq() for all of them.
113 * 124 *
114 * @freeze: Hibernation-specific, executed before creating a hibernation image. 125 * @freeze: Hibernation-specific, executed before creating a hibernation image.
115 * Quiesce operations so that a consistent image can be created, but do NOT 126 * Analogous to @suspend(), but it should not enable the device to signal
116 * otherwise put the device into a low power device state and do NOT emit 127 * wakeup events or change its power state. The majority of subsystems
117 * system wakeup events. Save in main memory the device settings to be 128 * (with the notable exception of the PCI bus type) expect the driver-level
118 * used by @restore() during the subsequent resume from hibernation or by 129 * @freeze() to save the device settings in memory to be used by @restore()
119 * the subsequent @thaw(), if the creation of the image or the restoration 130 * during the subsequent resume from hibernation.
120 * of main memory contents from it fails. 131 * Subsystem-level @freeze() is executed for all devices after invoking
132 * subsystem-level @prepare() for all of them.
121 * 133 *
122 * @thaw: Hibernation-specific, executed after creating a hibernation image OR 134 * @thaw: Hibernation-specific, executed after creating a hibernation image OR
123 * if the creation of the image fails. Also executed after a failing 135 * if the creation of an image has failed. Also executed after a failing
124 * attempt to restore the contents of main memory from such an image. 136 * attempt to restore the contents of main memory from such an image.
125 * Undo the changes made by the preceding @freeze(), so the device can be 137 * Undo the changes made by the preceding @freeze(), so the device can be
126 * operated in the same way as immediately before the call to @freeze(). 138 * operated in the same way as immediately before the call to @freeze().
139 * Subsystem-level @thaw() is executed for all devices after invoking
140 * subsystem-level @thaw_noirq() for all of them. It also may be executed
141 * directly after @freeze() in case of a transition error.
127 * 142 *
128 * @poweroff: Hibernation-specific, executed after saving a hibernation image. 143 * @poweroff: Hibernation-specific, executed after saving a hibernation image.
129 * Quiesce the device, put it into a low power state appropriate for the 144 * Analogous to @suspend(), but it need not save the device's settings in
130 * upcoming system state (such as PCI_D3hot), and enable wakeup events as 145 * memory.
131 * appropriate. 146 * Subsystem-level @poweroff() is executed for all devices after invoking
147 * subsystem-level @prepare() for all of them.
132 * 148 *
133 * @restore: Hibernation-specific, executed after restoring the contents of main 149 * @restore: Hibernation-specific, executed after restoring the contents of main
134 * memory from a hibernation image. Driver starts working again, 150 * memory from a hibernation image, analogous to @resume().
135 * responding to hardware events and software requests. Drivers may NOT 151 *
136 * make ANY assumptions about the hardware state right prior to @restore(). 152 * @suspend_noirq: Complete the actions started by @suspend(). Carry out any
137 * On most platforms, there are no restrictions on availability of 153 * additional operations required for suspending the device that might be
138 * resources like clocks during @restore(). 154 * racing with its driver's interrupt handler, which is guaranteed not to
139 * 155 * run while @suspend_noirq() is being executed.
140 * @suspend_noirq: Complete the operations of ->suspend() by carrying out any 156 * It generally is expected that the device will be in a low-power state
141 * actions required for suspending the device that need interrupts to be 157 * (appropriate for the target system sleep state) after subsystem-level
142 * disabled 158 * @suspend_noirq() has returned successfully. If the device can generate
143 * 159 * system wakeup signals and is enabled to wake up the system, it should be
144 * @resume_noirq: Prepare for the execution of ->resume() by carrying out any 160 * configured to do so at that time. However, depending on the platform
145 * actions required for resuming the device that need interrupts to be 161 * and device's subsystem, @suspend() may be allowed to put the device into
146 * disabled 162 * the low-power state and configure it to generate wakeup signals, in
147 * 163 * which case it generally is not necessary to define @suspend_noirq().
148 * @freeze_noirq: Complete the operations of ->freeze() by carrying out any 164 *
149 * actions required for freezing the device that need interrupts to be 165 * @resume_noirq: Prepare for the execution of @resume() by carrying out any
150 * disabled 166 * operations required for resuming the device that might be racing with
151 * 167 * its driver's interrupt handler, which is guaranteed not to run while
152 * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any 168 * @resume_noirq() is being executed.
153 * actions required for thawing the device that need interrupts to be 169 *
154 * disabled 170 * @freeze_noirq: Complete the actions started by @freeze(). Carry out any
155 * 171 * additional operations required for freezing the device that might be
156 * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any 172 * racing with its driver's interrupt handler, which is guaranteed not to
157 * actions required for handling the device that need interrupts to be 173 * run while @freeze_noirq() is being executed.
158 * disabled 174 * The power state of the device should not be changed by either @freeze()
159 * 175 * or @freeze_noirq() and it should not be configured to signal system
160 * @restore_noirq: Prepare for the execution of ->restore() by carrying out any 176 * wakeup by any of these callbacks.
161 * actions required for restoring the operations of the device that need 177 *
162 * interrupts to be disabled 178 * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
179 * operations required for thawing the device that might be racing with its
180 * driver's interrupt handler, which is guaranteed not to run while
181 * @thaw_noirq() is being executed.
182 *
183 * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to
184 * @suspend_noirq(), but it need not save the device's settings in memory.
185 *
186 * @restore_noirq: Prepare for the execution of @restore() by carrying out any
187 * operations required for thawing the device that might be racing with its
188 * driver's interrupt handler, which is guaranteed not to run while
189 * @restore_noirq() is being executed. Analogous to @resume_noirq().
163 * 190 *
164 * All of the above callbacks, except for @complete(), return error codes. 191 * All of the above callbacks, except for @complete(), return error codes.
165 * However, the error codes returned by the resume operations, @resume(), 192 * However, the error codes returned by the resume operations, @resume(),
166 * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do 193 * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do
167 * not cause the PM core to abort the resume transition during which they are 194 * not cause the PM core to abort the resume transition during which they are
168 * returned. The error codes returned in that cases are only printed by the PM 195 * returned. The error codes returned in those cases are only printed by the PM
169 * core to the system logs for debugging purposes. Still, it is recommended 196 * core to the system logs for debugging purposes. Still, it is recommended
170 * that drivers only return error codes from their resume methods in case of an 197 * that drivers only return error codes from their resume methods in case of an
171 * unrecoverable failure (i.e. when the device being handled refuses to resume 198 * unrecoverable failure (i.e. when the device being handled refuses to resume
@@ -174,31 +201,43 @@ typedef struct pm_message {
174 * their children. 201 * their children.
175 * 202 *
176 * It is allowed to unregister devices while the above callbacks are being 203 * It is allowed to unregister devices while the above callbacks are being
177 * executed. However, it is not allowed to unregister a device from within any 204 * executed. However, a callback routine must NOT try to unregister the device
178 * of its own callbacks. 205 * it was called for, although it may unregister children of that device (for
206 * example, if it detects that a child was unplugged while the system was
207 * asleep).
208 *
209 * Refer to Documentation/power/devices.txt for more information about the role
210 * of the above callbacks in the system suspend process.
179 * 211 *
180 * There also are the following callbacks related to run-time power management 212 * There also are callbacks related to runtime power management of devices.
181 * of devices: 213 * Again, these callbacks are executed by the PM core only for subsystems
214 * (PM domains, device types, classes and bus types) and the subsystem-level
215 * callbacks are supposed to invoke the driver callbacks. Moreover, the exact
216 * actions to be performed by a device driver's callbacks generally depend on
217 * the platform and subsystem the device belongs to.
182 * 218 *
183 * @runtime_suspend: Prepare the device for a condition in which it won't be 219 * @runtime_suspend: Prepare the device for a condition in which it won't be
184 * able to communicate with the CPU(s) and RAM due to power management. 220 * able to communicate with the CPU(s) and RAM due to power management.
185 * This need not mean that the device should be put into a low power state. 221 * This need not mean that the device should be put into a low-power state.
186 * For example, if the device is behind a link which is about to be turned 222 * For example, if the device is behind a link which is about to be turned
187 * off, the device may remain at full power. If the device does go to low 223 * off, the device may remain at full power. If the device does go to low
188 * power and is capable of generating run-time wake-up events, remote 224 * power and is capable of generating runtime wakeup events, remote wakeup
189 * wake-up (i.e., a hardware mechanism allowing the device to request a 225 * (i.e., a hardware mechanism allowing the device to request a change of
190 * change of its power state via a wake-up event, such as PCI PME) should 226 * its power state via an interrupt) should be enabled for it.
191 * be enabled for it.
192 * 227 *
193 * @runtime_resume: Put the device into the fully active state in response to a 228 * @runtime_resume: Put the device into the fully active state in response to a
194 * wake-up event generated by hardware or at the request of software. If 229 * wakeup event generated by hardware or at the request of software. If
195 * necessary, put the device into the full power state and restore its 230 * necessary, put the device into the full-power state and restore its
196 * registers, so that it is fully operational. 231 * registers, so that it is fully operational.
197 * 232 *
198 * @runtime_idle: Device appears to be inactive and it might be put into a low 233 * @runtime_idle: Device appears to be inactive and it might be put into a
199 * power state if all of the necessary conditions are satisfied. Check 234 * low-power state if all of the necessary conditions are satisfied. Check
200 * these conditions and handle the device as appropriate, possibly queueing 235 * these conditions and handle the device as appropriate, possibly queueing
201 * a suspend request for it. The return value is ignored by the PM core. 236 * a suspend request for it. The return value is ignored by the PM core.
237 *
238 * Refer to Documentation/power/runtime_pm.txt for more information about the
239 * role of the above callbacks in device runtime power management.
240 *
202 */ 241 */
203 242
204struct dev_pm_ops { 243struct dev_pm_ops {
@@ -447,6 +486,7 @@ struct dev_pm_info {
447 unsigned int async_suspend:1; 486 unsigned int async_suspend:1;
448 bool is_prepared:1; /* Owned by the PM core */ 487 bool is_prepared:1; /* Owned by the PM core */
449 bool is_suspended:1; /* Ditto */ 488 bool is_suspended:1; /* Ditto */
489 bool ignore_children:1;
450 spinlock_t lock; 490 spinlock_t lock;
451#ifdef CONFIG_PM_SLEEP 491#ifdef CONFIG_PM_SLEEP
452 struct list_head entry; 492 struct list_head entry;
@@ -464,7 +504,6 @@ struct dev_pm_info {
464 atomic_t usage_count; 504 atomic_t usage_count;
465 atomic_t child_count; 505 atomic_t child_count;
466 unsigned int disable_depth:3; 506 unsigned int disable_depth:3;
467 unsigned int ignore_children:1;
468 unsigned int idle_notification:1; 507 unsigned int idle_notification:1;
469 unsigned int request_pending:1; 508 unsigned int request_pending:1;
470 unsigned int deferred_resume:1; 509 unsigned int deferred_resume:1;
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index d8d903619642..d3085e72a0ee 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -52,11 +52,6 @@ static inline bool pm_children_suspended(struct device *dev)
52 || !atomic_read(&dev->power.child_count); 52 || !atomic_read(&dev->power.child_count);
53} 53}
54 54
55static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
56{
57 dev->power.ignore_children = enable;
58}
59
60static inline void pm_runtime_get_noresume(struct device *dev) 55static inline void pm_runtime_get_noresume(struct device *dev)
61{ 56{
62 atomic_inc(&dev->power.usage_count); 57 atomic_inc(&dev->power.usage_count);
@@ -130,7 +125,6 @@ static inline void pm_runtime_allow(struct device *dev) {}
130static inline void pm_runtime_forbid(struct device *dev) {} 125static inline void pm_runtime_forbid(struct device *dev) {}
131 126
132static inline bool pm_children_suspended(struct device *dev) { return false; } 127static inline bool pm_children_suspended(struct device *dev) { return false; }
133static inline void pm_suspend_ignore_children(struct device *dev, bool en) {}
134static inline void pm_runtime_get_noresume(struct device *dev) {} 128static inline void pm_runtime_get_noresume(struct device *dev) {}
135static inline void pm_runtime_put_noidle(struct device *dev) {} 129static inline void pm_runtime_put_noidle(struct device *dev) {}
136static inline bool device_run_wake(struct device *dev) { return false; } 130static inline bool device_run_wake(struct device *dev) { return false; }
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index ea567321ae3c..2ca8cde5459d 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -35,10 +35,12 @@ struct pstore_info {
35 spinlock_t buf_lock; /* serialize access to 'buf' */ 35 spinlock_t buf_lock; /* serialize access to 'buf' */
36 char *buf; 36 char *buf;
37 size_t bufsize; 37 size_t bufsize;
38 struct mutex read_mutex; /* serialize open/read/close */
38 int (*open)(struct pstore_info *psi); 39 int (*open)(struct pstore_info *psi);
39 int (*close)(struct pstore_info *psi); 40 int (*close)(struct pstore_info *psi);
40 ssize_t (*read)(u64 *id, enum pstore_type_id *type, 41 ssize_t (*read)(u64 *id, enum pstore_type_id *type,
41 struct timespec *time, struct pstore_info *psi); 42 struct timespec *time, char **buf,
43 struct pstore_info *psi);
42 int (*write)(enum pstore_type_id type, u64 *id, 44 int (*write)(enum pstore_type_id type, u64 *id,
43 unsigned int part, size_t size, struct pstore_info *psi); 45 unsigned int part, size_t size, struct pstore_info *psi);
44 int (*erase)(enum pstore_type_id type, u64 id, 46 int (*erase)(enum pstore_type_id type, u64 id,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 68daf4f27e2c..1c4f3e9b9bc5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1521,7 +1521,6 @@ struct task_struct {
1521#ifdef CONFIG_FAULT_INJECTION 1521#ifdef CONFIG_FAULT_INJECTION
1522 int make_it_fail; 1522 int make_it_fail;
1523#endif 1523#endif
1524 struct prop_local_single dirties;
1525 /* 1524 /*
1526 * when (nr_dirtied >= nr_dirtied_pause), it's time to call 1525 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1527 * balance_dirty_pages() for some dirty throttling pause 1526 * balance_dirty_pages() for some dirty throttling pause
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 97ff8e27a6cc..3d86517fe7d5 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -207,13 +207,15 @@ struct serial_icounter_struct {
207 207
208struct serial_rs485 { 208struct serial_rs485 {
209 __u32 flags; /* RS485 feature flags */ 209 __u32 flags; /* RS485 feature flags */
210#define SER_RS485_ENABLED (1 << 0) 210#define SER_RS485_ENABLED (1 << 0) /* If enabled */
211#define SER_RS485_RTS_ON_SEND (1 << 1) 211#define SER_RS485_RTS_ON_SEND (1 << 1) /* Logical level for
212#define SER_RS485_RTS_AFTER_SEND (1 << 2) 212 RTS pin when
213#define SER_RS485_RTS_BEFORE_SEND (1 << 3) 213 sending */
214#define SER_RS485_RTS_AFTER_SEND (1 << 2) /* Logical level for
215 RTS pin after sent*/
214#define SER_RS485_RX_DURING_TX (1 << 4) 216#define SER_RS485_RX_DURING_TX (1 << 4)
215 __u32 delay_rts_before_send; /* Milliseconds */ 217 __u32 delay_rts_before_send; /* Delay before send (milliseconds) */
216 __u32 delay_rts_after_send; /* Milliseconds */ 218 __u32 delay_rts_after_send; /* Delay after send (milliseconds) */
217 __u32 padding[5]; /* Memory is cheap, new structs 219 __u32 padding[5]; /* Memory is cheap, new structs
218 are a royal PITA .. */ 220 are a royal PITA .. */
219}; 221};
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 0efa1f10bc2b..369273a52679 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -67,6 +67,7 @@ enum {
67 SCIx_IRDA_REGTYPE, 67 SCIx_IRDA_REGTYPE,
68 SCIx_SCIFA_REGTYPE, 68 SCIx_SCIFA_REGTYPE,
69 SCIx_SCIFB_REGTYPE, 69 SCIx_SCIFB_REGTYPE,
70 SCIx_SH2_SCIF_FIFODATA_REGTYPE,
70 SCIx_SH3_SCIF_REGTYPE, 71 SCIx_SH3_SCIF_REGTYPE,
71 SCIx_SH4_SCIF_REGTYPE, 72 SCIx_SH4_SCIF_REGTYPE,
72 SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, 73 SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h
index 3ccf18648d0a..a20831cf336a 100644
--- a/include/linux/sh_clk.h
+++ b/include/linux/sh_clk.h
@@ -52,7 +52,6 @@ struct clk {
52 52
53 unsigned long arch_flags; 53 unsigned long arch_flags;
54 void *priv; 54 void *priv;
55 struct dentry *dentry;
56 struct clk_mapping *mapping; 55 struct clk_mapping *mapping;
57 struct cpufreq_frequency_table *freq_table; 56 struct cpufreq_frequency_table *freq_table;
58 unsigned int nr_freqs; 57 unsigned int nr_freqs;
@@ -94,6 +93,9 @@ int clk_rate_table_find(struct clk *clk,
94long clk_rate_div_range_round(struct clk *clk, unsigned int div_min, 93long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
95 unsigned int div_max, unsigned long rate); 94 unsigned int div_max, unsigned long rate);
96 95
96long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
97 unsigned int mult_max, unsigned long rate);
98
97long clk_round_parent(struct clk *clk, unsigned long target, 99long clk_round_parent(struct clk *clk, unsigned long target,
98 unsigned long *best_freq, unsigned long *parent_freq, 100 unsigned long *best_freq, unsigned long *parent_freq,
99 unsigned int div_min, unsigned int div_max); 101 unsigned int div_min, unsigned int div_max);
diff --git a/include/linux/sh_pfc.h b/include/linux/sh_pfc.h
index bc8c9208f7e2..8446789216e5 100644
--- a/include/linux/sh_pfc.h
+++ b/include/linux/sh_pfc.h
@@ -104,4 +104,80 @@ struct pinmux_info {
104int register_pinmux(struct pinmux_info *pip); 104int register_pinmux(struct pinmux_info *pip);
105int unregister_pinmux(struct pinmux_info *pip); 105int unregister_pinmux(struct pinmux_info *pip);
106 106
107/* helper macro for port */
108#define PORT_1(fn, pfx, sfx) fn(pfx, sfx)
109
110#define PORT_10(fn, pfx, sfx) \
111 PORT_1(fn, pfx##0, sfx), PORT_1(fn, pfx##1, sfx), \
112 PORT_1(fn, pfx##2, sfx), PORT_1(fn, pfx##3, sfx), \
113 PORT_1(fn, pfx##4, sfx), PORT_1(fn, pfx##5, sfx), \
114 PORT_1(fn, pfx##6, sfx), PORT_1(fn, pfx##7, sfx), \
115 PORT_1(fn, pfx##8, sfx), PORT_1(fn, pfx##9, sfx)
116
117#define PORT_90(fn, pfx, sfx) \
118 PORT_10(fn, pfx##1, sfx), PORT_10(fn, pfx##2, sfx), \
119 PORT_10(fn, pfx##3, sfx), PORT_10(fn, pfx##4, sfx), \
120 PORT_10(fn, pfx##5, sfx), PORT_10(fn, pfx##6, sfx), \
121 PORT_10(fn, pfx##7, sfx), PORT_10(fn, pfx##8, sfx), \
122 PORT_10(fn, pfx##9, sfx)
123
124#define _PORT_ALL(pfx, sfx) pfx##_##sfx
125#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
126#define PORT_ALL(str) CPU_ALL_PORT(_PORT_ALL, PORT, str)
127#define GPIO_PORT_ALL() CPU_ALL_PORT(_GPIO_PORT, , unused)
128#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
129
130/* helper macro for pinmux_enum_t */
131#define PORT_DATA_I(nr) \
132 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN)
133
134#define PORT_DATA_I_PD(nr) \
135 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
136 PORT##nr##_IN, PORT##nr##_IN_PD)
137
138#define PORT_DATA_I_PU(nr) \
139 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
140 PORT##nr##_IN, PORT##nr##_IN_PU)
141
142#define PORT_DATA_I_PU_PD(nr) \
143 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
144 PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
145
146#define PORT_DATA_O(nr) \
147 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT)
148
149#define PORT_DATA_IO(nr) \
150 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
151 PORT##nr##_IN)
152
153#define PORT_DATA_IO_PD(nr) \
154 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
155 PORT##nr##_IN, PORT##nr##_IN_PD)
156
157#define PORT_DATA_IO_PU(nr) \
158 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
159 PORT##nr##_IN, PORT##nr##_IN_PU)
160
161#define PORT_DATA_IO_PU_PD(nr) \
162 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
163 PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
164
165/* helper macro for top 4 bits in PORTnCR */
166#define _PCRH(in, in_pd, in_pu, out) \
167 0, (out), (in), 0, \
168 0, 0, 0, 0, \
169 0, 0, (in_pd), 0, \
170 0, 0, (in_pu), 0
171
172#define PORTCR(nr, reg) \
173 { \
174 PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
175 _PCRH(PORT##nr##_IN, PORT##nr##_IN_PD, \
176 PORT##nr##_IN_PU, PORT##nr##_OUT), \
177 PORT##nr##_FN0, PORT##nr##_FN1, \
178 PORT##nr##_FN2, PORT##nr##_FN3, \
179 PORT##nr##_FN4, PORT##nr##_FN5, \
180 PORT##nr##_FN6, PORT##nr##_FN7 } \
181 }
182
107#endif /* __SH_PFC_H */ 183#endif /* __SH_PFC_H */
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index a83833a1f7a2..07ceb97d53fa 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -35,7 +35,7 @@ struct shrinker {
35 35
36 /* These are for internal use */ 36 /* These are for internal use */
37 struct list_head list; 37 struct list_head list;
38 long nr; /* objs pending delete */ 38 atomic_long_t nr_in_batch; /* objs pending delete */
39}; 39};
40#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ 40#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
41extern void register_shrinker(struct shrinker *); 41extern void register_shrinker(struct shrinker *);
diff --git a/include/linux/sigma.h b/include/linux/sigma.h
index e2accb3164d8..d0de882c0d96 100644
--- a/include/linux/sigma.h
+++ b/include/linux/sigma.h
@@ -24,7 +24,7 @@ struct sigma_firmware {
24struct sigma_firmware_header { 24struct sigma_firmware_header {
25 unsigned char magic[7]; 25 unsigned char magic[7];
26 u8 version; 26 u8 version;
27 u32 crc; 27 __le32 crc;
28}; 28};
29 29
30enum { 30enum {
@@ -40,19 +40,14 @@ enum {
40struct sigma_action { 40struct sigma_action {
41 u8 instr; 41 u8 instr;
42 u8 len_hi; 42 u8 len_hi;
43 u16 len; 43 __le16 len;
44 u16 addr; 44 __be16 addr;
45 unsigned char payload[]; 45 unsigned char payload[];
46}; 46};
47 47
48static inline u32 sigma_action_len(struct sigma_action *sa) 48static inline u32 sigma_action_len(struct sigma_action *sa)
49{ 49{
50 return (sa->len_hi << 16) | sa->len; 50 return (sa->len_hi << 16) | le16_to_cpu(sa->len);
51}
52
53static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len)
54{
55 return sizeof(*sa) + payload_len + (payload_len % 2);
56} 51}
57 52
58extern int process_sigma_firmware(struct i2c_client *client, const char *name); 53extern int process_sigma_firmware(struct i2c_client *client, const char *name);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index add4790b21fe..e9e72bda1b72 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -85,6 +85,8 @@
85 * @reset: reset the device 85 * @reset: reset the device
86 * vdev: the virtio device 86 * vdev: the virtio device
87 * After this, status and feature negotiation must be done again 87 * After this, status and feature negotiation must be done again
88 * Device must not be reset from its vq/config callbacks, or in
89 * parallel with being added/removed.
88 * @find_vqs: find virtqueues and instantiate them. 90 * @find_vqs: find virtqueues and instantiate them.
89 * vdev: the virtio_device 91 * vdev: the virtio_device
90 * nvqs: the number of virtqueues to find 92 * nvqs: the number of virtqueues to find
diff --git a/include/linux/virtio_mmio.h b/include/linux/virtio_mmio.h
index 27c7edefbc86..5c7b6f0daef8 100644
--- a/include/linux/virtio_mmio.h
+++ b/include/linux/virtio_mmio.h
@@ -63,7 +63,7 @@
63#define VIRTIO_MMIO_GUEST_FEATURES 0x020 63#define VIRTIO_MMIO_GUEST_FEATURES 0x020
64 64
65/* Activated features set selector - Write Only */ 65/* Activated features set selector - Write Only */
66#define VIRTIO_MMIO_GUEST_FEATURES_SET 0x024 66#define VIRTIO_MMIO_GUEST_FEATURES_SEL 0x024
67 67
68/* Guest's memory page size in bytes - Write Only */ 68/* Guest's memory page size in bytes - Write Only */
69#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028 69#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 687fb11e2010..4bde182fcf93 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -119,7 +119,7 @@ unmap_kernel_range(unsigned long addr, unsigned long size)
119#endif 119#endif
120 120
121/* Allocate/destroy a 'vmalloc' VM area. */ 121/* Allocate/destroy a 'vmalloc' VM area. */
122extern struct vm_struct *alloc_vm_area(size_t size); 122extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
123extern void free_vm_area(struct vm_struct *area); 123extern void free_vm_area(struct vm_struct *area);
124 124
125/* for /dev/kmem */ 125/* for /dev/kmem */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index ab90ae0970a6..6cc18f371675 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -39,8 +39,11 @@
39#define L2CAP_DEFAULT_ACK_TO 200 39#define L2CAP_DEFAULT_ACK_TO 200
40#define L2CAP_LE_DEFAULT_MTU 23 40#define L2CAP_LE_DEFAULT_MTU 23
41 41
42#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */ 42#define L2CAP_DISC_TIMEOUT (100)
43#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */ 43#define L2CAP_DISC_REJ_TIMEOUT (5000) /* 5 seconds */
44#define L2CAP_ENC_TIMEOUT (5000) /* 5 seconds */
45#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */
46#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */
44 47
45/* L2CAP socket address */ 48/* L2CAP socket address */
46struct sockaddr_l2 { 49struct sockaddr_l2 {
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 92cf1c2c30c9..95852e36713b 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -456,6 +456,9 @@ enum station_parameters_apply_mask {
456 * as the AC bitmap in the QoS info field 456 * as the AC bitmap in the QoS info field
457 * @max_sp: max Service Period. same format as the MAX_SP in the 457 * @max_sp: max Service Period. same format as the MAX_SP in the
458 * QoS info field (but already shifted down) 458 * QoS info field (but already shifted down)
459 * @sta_modify_mask: bitmap indicating which parameters changed
460 * (for those that don't have a natural "no change" value),
461 * see &enum station_parameters_apply_mask
459 */ 462 */
460struct station_parameters { 463struct station_parameters {
461 u8 *supported_rates; 464 u8 *supported_rates;
@@ -615,6 +618,7 @@ struct sta_bss_parameters {
615 * user space MLME/SME implementation. The information is provided for 618 * user space MLME/SME implementation. The information is provided for
616 * the cfg80211_new_sta() calls to notify user space of the IEs. 619 * the cfg80211_new_sta() calls to notify user space of the IEs.
617 * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets. 620 * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
621 * @sta_flags: station flags mask & values
618 */ 622 */
619struct station_info { 623struct station_info {
620 u32 filled; 624 u32 filled;
diff --git a/include/net/dst.h b/include/net/dst.h
index 4fb6c4381791..6faec1a60216 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -205,12 +205,7 @@ dst_feature(const struct dst_entry *dst, u32 feature)
205 205
206static inline u32 dst_mtu(const struct dst_entry *dst) 206static inline u32 dst_mtu(const struct dst_entry *dst)
207{ 207{
208 u32 mtu = dst_metric_raw(dst, RTAX_MTU); 208 return dst->ops->mtu(dst);
209
210 if (!mtu)
211 mtu = dst->ops->default_mtu(dst);
212
213 return mtu;
214} 209}
215 210
216/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ 211/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 9adb99845a56..e1c2ee0eef47 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -17,7 +17,7 @@ struct dst_ops {
17 int (*gc)(struct dst_ops *ops); 17 int (*gc)(struct dst_ops *ops);
18 struct dst_entry * (*check)(struct dst_entry *, __u32 cookie); 18 struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
19 unsigned int (*default_advmss)(const struct dst_entry *); 19 unsigned int (*default_advmss)(const struct dst_entry *);
20 unsigned int (*default_mtu)(const struct dst_entry *); 20 unsigned int (*mtu)(const struct dst_entry *);
21 u32 * (*cow_metrics)(struct dst_entry *, unsigned long); 21 u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
22 void (*destroy)(struct dst_entry *); 22 void (*destroy)(struct dst_entry *);
23 void (*ifdown)(struct dst_entry *, 23 void (*ifdown)(struct dst_entry *,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index b897d6e6d0a5..f941964a9931 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -31,6 +31,7 @@
31/** struct ip_options - IP Options 31/** struct ip_options - IP Options
32 * 32 *
33 * @faddr - Saved first hop address 33 * @faddr - Saved first hop address
34 * @nexthop - Saved nexthop address in LSRR and SSRR
34 * @is_data - Options in __data, rather than skb 35 * @is_data - Options in __data, rather than skb
35 * @is_strictroute - Strict source route 36 * @is_strictroute - Strict source route
36 * @srr_is_hit - Packet destination addr was our one 37 * @srr_is_hit - Packet destination addr was our one
@@ -41,6 +42,7 @@
41 */ 42 */
42struct ip_options { 43struct ip_options {
43 __be32 faddr; 44 __be32 faddr;
45 __be32 nexthop;
44 unsigned char optlen; 46 unsigned char optlen;
45 unsigned char srr; 47 unsigned char srr;
46 unsigned char rr; 48 unsigned char rr;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 78c83e62218f..e9ff3fc5e688 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -35,6 +35,7 @@ struct inet_peer {
35 35
36 u32 metrics[RTAX_MAX]; 36 u32 metrics[RTAX_MAX];
37 u32 rate_tokens; /* rate limiting for ICMP */ 37 u32 rate_tokens; /* rate limiting for ICMP */
38 int redirect_genid;
38 unsigned long rate_last; 39 unsigned long rate_last;
39 unsigned long pmtu_expires; 40 unsigned long pmtu_expires;
40 u32 pmtu_orig; 41 u32 pmtu_orig;
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 4283508b3e18..a88fb6939387 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -67,18 +67,18 @@ struct nf_ct_event_notifier {
67 int (*fcn)(unsigned int events, struct nf_ct_event *item); 67 int (*fcn)(unsigned int events, struct nf_ct_event *item);
68}; 68};
69 69
70extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; 70extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
71extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb); 71extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
72extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
73 72
74extern void nf_ct_deliver_cached_events(struct nf_conn *ct); 73extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
75 74
76static inline void 75static inline void
77nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) 76nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
78{ 77{
78 struct net *net = nf_ct_net(ct);
79 struct nf_conntrack_ecache *e; 79 struct nf_conntrack_ecache *e;
80 80
81 if (nf_conntrack_event_cb == NULL) 81 if (net->ct.nf_conntrack_event_cb == NULL)
82 return; 82 return;
83 83
84 e = nf_ct_ecache_find(ct); 84 e = nf_ct_ecache_find(ct);
@@ -95,11 +95,12 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
95 int report) 95 int report)
96{ 96{
97 int ret = 0; 97 int ret = 0;
98 struct net *net = nf_ct_net(ct);
98 struct nf_ct_event_notifier *notify; 99 struct nf_ct_event_notifier *notify;
99 struct nf_conntrack_ecache *e; 100 struct nf_conntrack_ecache *e;
100 101
101 rcu_read_lock(); 102 rcu_read_lock();
102 notify = rcu_dereference(nf_conntrack_event_cb); 103 notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
103 if (notify == NULL) 104 if (notify == NULL)
104 goto out_unlock; 105 goto out_unlock;
105 106
@@ -164,9 +165,8 @@ struct nf_exp_event_notifier {
164 int (*fcn)(unsigned int events, struct nf_exp_event *item); 165 int (*fcn)(unsigned int events, struct nf_exp_event *item);
165}; 166};
166 167
167extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb; 168extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
168extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb); 169extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
169extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
170 170
171static inline void 171static inline void
172nf_ct_expect_event_report(enum ip_conntrack_expect_events event, 172nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
@@ -174,11 +174,12 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
174 u32 pid, 174 u32 pid,
175 int report) 175 int report)
176{ 176{
177 struct net *net = nf_ct_exp_net(exp);
177 struct nf_exp_event_notifier *notify; 178 struct nf_exp_event_notifier *notify;
178 struct nf_conntrack_ecache *e; 179 struct nf_conntrack_ecache *e;
179 180
180 rcu_read_lock(); 181 rcu_read_lock();
181 notify = rcu_dereference(nf_expect_event_cb); 182 notify = rcu_dereference(net->ct.nf_expect_event_cb);
182 if (notify == NULL) 183 if (notify == NULL)
183 goto out_unlock; 184 goto out_unlock;
184 185
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 0249399e51a7..7a911eca0f18 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -18,6 +18,8 @@ struct netns_ct {
18 struct hlist_nulls_head unconfirmed; 18 struct hlist_nulls_head unconfirmed;
19 struct hlist_nulls_head dying; 19 struct hlist_nulls_head dying;
20 struct ip_conntrack_stat __percpu *stat; 20 struct ip_conntrack_stat __percpu *stat;
21 struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
22 struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
21 int sysctl_events; 23 int sysctl_events;
22 unsigned int sysctl_events_retry_timeout; 24 unsigned int sysctl_events_retry_timeout;
23 int sysctl_acct; 25 int sysctl_acct;
diff --git a/include/net/red.h b/include/net/red.h
index 3319f16b3beb..b72a3b833936 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -116,7 +116,7 @@ struct red_parms {
116 u32 qR; /* Cached random number */ 116 u32 qR; /* Cached random number */
117 117
118 unsigned long qavg; /* Average queue length: A scaled */ 118 unsigned long qavg; /* Average queue length: A scaled */
119 psched_time_t qidlestart; /* Start of current idle period */ 119 ktime_t qidlestart; /* Start of current idle period */
120}; 120};
121 121
122static inline u32 red_rmask(u8 Plog) 122static inline u32 red_rmask(u8 Plog)
@@ -148,17 +148,17 @@ static inline void red_set_parms(struct red_parms *p,
148 148
149static inline int red_is_idling(struct red_parms *p) 149static inline int red_is_idling(struct red_parms *p)
150{ 150{
151 return p->qidlestart != PSCHED_PASTPERFECT; 151 return p->qidlestart.tv64 != 0;
152} 152}
153 153
154static inline void red_start_of_idle_period(struct red_parms *p) 154static inline void red_start_of_idle_period(struct red_parms *p)
155{ 155{
156 p->qidlestart = psched_get_time(); 156 p->qidlestart = ktime_get();
157} 157}
158 158
159static inline void red_end_of_idle_period(struct red_parms *p) 159static inline void red_end_of_idle_period(struct red_parms *p)
160{ 160{
161 p->qidlestart = PSCHED_PASTPERFECT; 161 p->qidlestart.tv64 = 0;
162} 162}
163 163
164static inline void red_restart(struct red_parms *p) 164static inline void red_restart(struct red_parms *p)
@@ -170,13 +170,10 @@ static inline void red_restart(struct red_parms *p)
170 170
171static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) 171static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
172{ 172{
173 psched_time_t now; 173 s64 delta = ktime_us_delta(ktime_get(), p->qidlestart);
174 long us_idle; 174 long us_idle = min_t(s64, delta, p->Scell_max);
175 int shift; 175 int shift;
176 176
177 now = psched_get_time();
178 us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max);
179
180 /* 177 /*
181 * The problem: ideally, average length queue recalcultion should 178 * The problem: ideally, average length queue recalcultion should
182 * be done over constant clock intervals. This is too expensive, so 179 * be done over constant clock intervals. This is too expensive, so
diff --git a/include/net/route.h b/include/net/route.h
index db7b3432f07c..91855d185b53 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -71,12 +71,12 @@ struct rtable {
71 struct fib_info *fi; /* for client ref to shared metrics */ 71 struct fib_info *fi; /* for client ref to shared metrics */
72}; 72};
73 73
74static inline bool rt_is_input_route(struct rtable *rt) 74static inline bool rt_is_input_route(const struct rtable *rt)
75{ 75{
76 return rt->rt_route_iif != 0; 76 return rt->rt_route_iif != 0;
77} 77}
78 78
79static inline bool rt_is_output_route(struct rtable *rt) 79static inline bool rt_is_output_route(const struct rtable *rt)
80{ 80{
81 return rt->rt_route_iif == 0; 81 return rt->rt_route_iif == 0;
82} 82}
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7f5fed3c89e1..6873c7dd9145 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -103,9 +103,10 @@ enum se_cmd_flags_table {
103 SCF_SCSI_NON_DATA_CDB = 0x00000040, 103 SCF_SCSI_NON_DATA_CDB = 0x00000040,
104 SCF_SCSI_CDB_EXCEPTION = 0x00000080, 104 SCF_SCSI_CDB_EXCEPTION = 0x00000080,
105 SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, 105 SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
106 SCF_SE_CMD_FAILED = 0x00000400, 106 SCF_FUA = 0x00000200,
107 SCF_SE_LUN_CMD = 0x00000800, 107 SCF_SE_LUN_CMD = 0x00000800,
108 SCF_SE_ALLOW_EOO = 0x00001000, 108 SCF_SE_ALLOW_EOO = 0x00001000,
109 SCF_BIDI = 0x00002000,
109 SCF_SENT_CHECK_CONDITION = 0x00004000, 110 SCF_SENT_CHECK_CONDITION = 0x00004000,
110 SCF_OVERFLOW_BIT = 0x00008000, 111 SCF_OVERFLOW_BIT = 0x00008000,
111 SCF_UNDERFLOW_BIT = 0x00010000, 112 SCF_UNDERFLOW_BIT = 0x00010000,
@@ -154,6 +155,7 @@ enum tcm_sense_reason_table {
154 TCM_CHECK_CONDITION_ABORT_CMD = 0x0d, 155 TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
155 TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, 156 TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
156 TCM_CHECK_CONDITION_NOT_READY = 0x0f, 157 TCM_CHECK_CONDITION_NOT_READY = 0x0f,
158 TCM_RESERVATION_CONFLICT = 0x10,
157}; 159};
158 160
159struct se_obj { 161struct se_obj {
@@ -211,7 +213,6 @@ struct t10_alua_lu_gp {
211 u16 lu_gp_id; 213 u16 lu_gp_id;
212 int lu_gp_valid_id; 214 int lu_gp_valid_id;
213 u32 lu_gp_members; 215 u32 lu_gp_members;
214 atomic_t lu_gp_shutdown;
215 atomic_t lu_gp_ref_cnt; 216 atomic_t lu_gp_ref_cnt;
216 spinlock_t lu_gp_lock; 217 spinlock_t lu_gp_lock;
217 struct config_group lu_gp_group; 218 struct config_group lu_gp_group;
@@ -422,11 +423,9 @@ struct se_cmd {
422 int sam_task_attr; 423 int sam_task_attr;
423 /* Transport protocol dependent state, see transport_state_table */ 424 /* Transport protocol dependent state, see transport_state_table */
424 enum transport_state_table t_state; 425 enum transport_state_table t_state;
425 /* Transport specific error status */
426 int transport_error_status;
427 /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */ 426 /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
428 int check_release:1; 427 unsigned check_release:1;
429 int cmd_wait_set:1; 428 unsigned cmd_wait_set:1;
430 /* See se_cmd_flags_table */ 429 /* See se_cmd_flags_table */
431 u32 se_cmd_flags; 430 u32 se_cmd_flags;
432 u32 se_ordered_id; 431 u32 se_ordered_id;
@@ -441,13 +440,10 @@ struct se_cmd {
441 /* Used for sense data */ 440 /* Used for sense data */
442 void *sense_buffer; 441 void *sense_buffer;
443 struct list_head se_delayed_node; 442 struct list_head se_delayed_node;
444 struct list_head se_ordered_node;
445 struct list_head se_lun_node; 443 struct list_head se_lun_node;
446 struct list_head se_qf_node; 444 struct list_head se_qf_node;
447 struct se_device *se_dev; 445 struct se_device *se_dev;
448 struct se_dev_entry *se_deve; 446 struct se_dev_entry *se_deve;
449 struct se_device *se_obj_ptr;
450 struct se_device *se_orig_obj_ptr;
451 struct se_lun *se_lun; 447 struct se_lun *se_lun;
452 /* Only used for internal passthrough and legacy TCM fabric modules */ 448 /* Only used for internal passthrough and legacy TCM fabric modules */
453 struct se_session *se_sess; 449 struct se_session *se_sess;
@@ -463,8 +459,6 @@ struct se_cmd {
463 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; 459 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
464 unsigned long long t_task_lba; 460 unsigned long long t_task_lba;
465 int t_tasks_failed; 461 int t_tasks_failed;
466 int t_tasks_fua;
467 bool t_tasks_bidi;
468 u32 t_tasks_sg_chained_no; 462 u32 t_tasks_sg_chained_no;
469 atomic_t t_fe_count; 463 atomic_t t_fe_count;
470 atomic_t t_se_count; 464 atomic_t t_se_count;
@@ -489,14 +483,6 @@ struct se_cmd {
489 483
490 struct work_struct work; 484 struct work_struct work;
491 485
492 /*
493 * Used for pre-registered fabric SGL passthrough WRITE and READ
494 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
495 * and other HW target mode fabric modules.
496 */
497 struct scatterlist *t_task_pt_sgl;
498 u32 t_task_pt_sgl_num;
499
500 struct scatterlist *t_data_sg; 486 struct scatterlist *t_data_sg;
501 unsigned int t_data_nents; 487 unsigned int t_data_nents;
502 struct scatterlist *t_bidi_data_sg; 488 struct scatterlist *t_bidi_data_sg;
@@ -562,7 +548,7 @@ struct se_node_acl {
562} ____cacheline_aligned; 548} ____cacheline_aligned;
563 549
564struct se_session { 550struct se_session {
565 int sess_tearing_down:1; 551 unsigned sess_tearing_down:1;
566 u64 sess_bin_isid; 552 u64 sess_bin_isid;
567 struct se_node_acl *se_node_acl; 553 struct se_node_acl *se_node_acl;
568 struct se_portal_group *se_tpg; 554 struct se_portal_group *se_tpg;
@@ -683,7 +669,6 @@ struct se_subsystem_dev {
683 struct t10_reservation t10_pr; 669 struct t10_reservation t10_pr;
684 spinlock_t se_dev_lock; 670 spinlock_t se_dev_lock;
685 void *se_dev_su_ptr; 671 void *se_dev_su_ptr;
686 struct list_head se_dev_node;
687 struct config_group se_dev_group; 672 struct config_group se_dev_group;
688 /* For T10 Reservations */ 673 /* For T10 Reservations */
689 struct config_group se_dev_pr_group; 674 struct config_group se_dev_pr_group;
@@ -692,9 +677,6 @@ struct se_subsystem_dev {
692} ____cacheline_aligned; 677} ____cacheline_aligned;
693 678
694struct se_device { 679struct se_device {
695 /* Set to 1 if thread is NOT sleeping on thread_sem */
696 u8 thread_active;
697 u8 dev_status_timer_flags;
698 /* RELATIVE TARGET PORT IDENTIFER Counter */ 680 /* RELATIVE TARGET PORT IDENTIFER Counter */
699 u16 dev_rpti_counter; 681 u16 dev_rpti_counter;
700 /* Used for SAM Task Attribute ordering */ 682 /* Used for SAM Task Attribute ordering */
@@ -719,14 +701,10 @@ struct se_device {
719 u64 write_bytes; 701 u64 write_bytes;
720 spinlock_t stats_lock; 702 spinlock_t stats_lock;
721 /* Active commands on this virtual SE device */ 703 /* Active commands on this virtual SE device */
722 atomic_t active_cmds;
723 atomic_t simple_cmds; 704 atomic_t simple_cmds;
724 atomic_t depth_left; 705 atomic_t depth_left;
725 atomic_t dev_ordered_id; 706 atomic_t dev_ordered_id;
726 atomic_t dev_tur_active;
727 atomic_t execute_tasks; 707 atomic_t execute_tasks;
728 atomic_t dev_status_thr_count;
729 atomic_t dev_hoq_count;
730 atomic_t dev_ordered_sync; 708 atomic_t dev_ordered_sync;
731 atomic_t dev_qf_count; 709 atomic_t dev_qf_count;
732 struct se_obj dev_obj; 710 struct se_obj dev_obj;
@@ -734,14 +712,9 @@ struct se_device {
734 struct se_obj dev_export_obj; 712 struct se_obj dev_export_obj;
735 struct se_queue_obj dev_queue_obj; 713 struct se_queue_obj dev_queue_obj;
736 spinlock_t delayed_cmd_lock; 714 spinlock_t delayed_cmd_lock;
737 spinlock_t ordered_cmd_lock;
738 spinlock_t execute_task_lock; 715 spinlock_t execute_task_lock;
739 spinlock_t state_task_lock;
740 spinlock_t dev_alua_lock;
741 spinlock_t dev_reservation_lock; 716 spinlock_t dev_reservation_lock;
742 spinlock_t dev_state_lock;
743 spinlock_t dev_status_lock; 717 spinlock_t dev_status_lock;
744 spinlock_t dev_status_thr_lock;
745 spinlock_t se_port_lock; 718 spinlock_t se_port_lock;
746 spinlock_t se_tmr_lock; 719 spinlock_t se_tmr_lock;
747 spinlock_t qf_cmd_lock; 720 spinlock_t qf_cmd_lock;
@@ -753,14 +726,10 @@ struct se_device {
753 struct t10_pr_registration *dev_pr_res_holder; 726 struct t10_pr_registration *dev_pr_res_holder;
754 struct list_head dev_sep_list; 727 struct list_head dev_sep_list;
755 struct list_head dev_tmr_list; 728 struct list_head dev_tmr_list;
756 struct timer_list dev_status_timer;
757 /* Pointer to descriptor for processing thread */ 729 /* Pointer to descriptor for processing thread */
758 struct task_struct *process_thread; 730 struct task_struct *process_thread;
759 pid_t process_thread_pid;
760 struct task_struct *dev_mgmt_thread;
761 struct work_struct qf_work_queue; 731 struct work_struct qf_work_queue;
762 struct list_head delayed_cmd_list; 732 struct list_head delayed_cmd_list;
763 struct list_head ordered_cmd_list;
764 struct list_head execute_task_list; 733 struct list_head execute_task_list;
765 struct list_head state_task_list; 734 struct list_head state_task_list;
766 struct list_head qf_cmd_list; 735 struct list_head qf_cmd_list;
@@ -771,8 +740,6 @@ struct se_device {
771 struct se_subsystem_api *transport; 740 struct se_subsystem_api *transport;
772 /* Linked list for struct se_hba struct se_device list */ 741 /* Linked list for struct se_hba struct se_device list */
773 struct list_head dev_list; 742 struct list_head dev_list;
774 /* Linked list for struct se_global->g_se_dev_list */
775 struct list_head g_se_dev_list;
776} ____cacheline_aligned; 743} ____cacheline_aligned;
777 744
778struct se_hba { 745struct se_hba {
@@ -834,7 +801,6 @@ struct se_port {
834 u32 sep_index; 801 u32 sep_index;
835 struct scsi_port_stats sep_stats; 802 struct scsi_port_stats sep_stats;
836 /* Used for ALUA Target Port Groups membership */ 803 /* Used for ALUA Target Port Groups membership */
837 atomic_t sep_tg_pt_gp_active;
838 atomic_t sep_tg_pt_secondary_offline; 804 atomic_t sep_tg_pt_secondary_offline;
839 /* Used for PR ALL_TG_PT=1 */ 805 /* Used for PR ALL_TG_PT=1 */
840 atomic_t sep_tg_pt_ref_cnt; 806 atomic_t sep_tg_pt_ref_cnt;
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index c16e9431dd01..dac4f2d859fd 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -10,29 +10,6 @@
10 10
11#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */ 11#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
12 12
13#define PYX_TRANSPORT_SENT_TO_TRANSPORT 0
14#define PYX_TRANSPORT_WRITE_PENDING 1
15
16#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1
17#define PYX_TRANSPORT_HBA_QUEUE_FULL -2
18#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3
19#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4
20#define PYX_TRANSPORT_INVALID_CDB_FIELD -5
21#define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6
22#define PYX_TRANSPORT_LU_COMM_FAILURE -7
23#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8
24#define PYX_TRANSPORT_WRITE_PROTECTED -9
25#define PYX_TRANSPORT_RESERVATION_CONFLICT -10
26#define PYX_TRANSPORT_ILLEGAL_REQUEST -11
27#define PYX_TRANSPORT_USE_SENSE_REASON -12
28
29#ifndef SAM_STAT_RESERVATION_CONFLICT
30#define SAM_STAT_RESERVATION_CONFLICT 0x18
31#endif
32
33#define TRANSPORT_PLUGIN_FREE 0
34#define TRANSPORT_PLUGIN_REGISTERED 1
35
36#define TRANSPORT_PLUGIN_PHBA_PDEV 1 13#define TRANSPORT_PLUGIN_PHBA_PDEV 1
37#define TRANSPORT_PLUGIN_VHBA_PDEV 2 14#define TRANSPORT_PLUGIN_VHBA_PDEV 2
38#define TRANSPORT_PLUGIN_VHBA_VDEV 3 15#define TRANSPORT_PLUGIN_VHBA_VDEV 3
@@ -158,7 +135,6 @@ extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
158extern int transport_handle_cdb_direct(struct se_cmd *); 135extern int transport_handle_cdb_direct(struct se_cmd *);
159extern int transport_generic_handle_cdb_map(struct se_cmd *); 136extern int transport_generic_handle_cdb_map(struct se_cmd *);
160extern int transport_generic_handle_data(struct se_cmd *); 137extern int transport_generic_handle_data(struct se_cmd *);
161extern void transport_new_cmd_failure(struct se_cmd *);
162extern int transport_generic_handle_tmr(struct se_cmd *); 138extern int transport_generic_handle_tmr(struct se_cmd *);
163extern bool target_stop_task(struct se_task *task, unsigned long *flags); 139extern bool target_stop_task(struct se_task *task, unsigned long *flags);
164extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, 140extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index b66ebb2032c6..378c7ed6760b 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -307,15 +307,8 @@ struct omap_dss_board_info {
307 void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask); 307 void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
308}; 308};
309 309
310#if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS)
311/* Init with the board info */ 310/* Init with the board info */
312extern int omap_display_init(struct omap_dss_board_info *board_data); 311extern int omap_display_init(struct omap_dss_board_info *board_data);
313#else
314static inline int omap_display_init(struct omap_dss_board_info *board_data)
315{
316 return 0;
317}
318#endif
319 312
320struct omap_display_platform_data { 313struct omap_display_platform_data {
321 struct omap_dss_board_info *board_data; 314 struct omap_dss_board_info *board_data;
diff --git a/include/xen/platform_pci.h b/include/xen/platform_pci.h
index a785a3b0c8c7..438c256c274b 100644
--- a/include/xen/platform_pci.h
+++ b/include/xen/platform_pci.h
@@ -29,8 +29,7 @@
29static inline int xen_must_unplug_nics(void) { 29static inline int xen_must_unplug_nics(void) {
30#if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ 30#if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \
31 defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \ 31 defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \
32 (defined(CONFIG_XEN_PLATFORM_PCI) || \ 32 defined(CONFIG_XEN_PVHVM)
33 defined(CONFIG_XEN_PLATFORM_PCI_MODULE))
34 return 1; 33 return 1;
35#else 34#else
36 return 0; 35 return 0;
@@ -40,8 +39,7 @@ static inline int xen_must_unplug_nics(void) {
40static inline int xen_must_unplug_disks(void) { 39static inline int xen_must_unplug_disks(void) {
41#if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \ 40#if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \
42 defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \ 41 defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \
43 (defined(CONFIG_XEN_PLATFORM_PCI) || \ 42 defined(CONFIG_XEN_PVHVM)
44 defined(CONFIG_XEN_PLATFORM_PCI_MODULE))
45 return 1; 43 return 1;
46#else 44#else
47 return 0; 45 return 0;
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 2e0ecfcc881d..5b4293d9819d 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1269,7 +1269,7 @@ void mq_clear_sbinfo(struct ipc_namespace *ns)
1269 1269
1270void mq_put_mnt(struct ipc_namespace *ns) 1270void mq_put_mnt(struct ipc_namespace *ns)
1271{ 1271{
1272 mntput(ns->mq_mnt); 1272 kern_unmount(ns->mq_mnt);
1273} 1273}
1274 1274
1275static int __init init_mqueue_fs(void) 1275static int __init init_mqueue_fs(void)
@@ -1291,11 +1291,9 @@ static int __init init_mqueue_fs(void)
1291 1291
1292 spin_lock_init(&mq_lock); 1292 spin_lock_init(&mq_lock);
1293 1293
1294 init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns); 1294 error = mq_init_ns(&init_ipc_ns);
1295 if (IS_ERR(init_ipc_ns.mq_mnt)) { 1295 if (error)
1296 error = PTR_ERR(init_ipc_ns.mq_mnt);
1297 goto out_filesystem; 1296 goto out_filesystem;
1298 }
1299 1297
1300 return 0; 1298 return 0;
1301 1299
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index 8b5ce5d3f3ef..5652101cdac0 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -27,11 +27,6 @@ DEFINE_SPINLOCK(mq_lock);
27 */ 27 */
28struct ipc_namespace init_ipc_ns = { 28struct ipc_namespace init_ipc_ns = {
29 .count = ATOMIC_INIT(1), 29 .count = ATOMIC_INIT(1),
30#ifdef CONFIG_POSIX_MQUEUE
31 .mq_queues_max = DFLT_QUEUESMAX,
32 .mq_msg_max = DFLT_MSGMAX,
33 .mq_msgsize_max = DFLT_MSGSIZEMAX,
34#endif
35 .user_ns = &init_user_ns, 30 .user_ns = &init_user_ns,
36}; 31};
37 32
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 5e828a2ca8e6..213c0351dad8 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -153,6 +153,13 @@ static void freezer_destroy(struct cgroup_subsys *ss,
153 kfree(cgroup_freezer(cgroup)); 153 kfree(cgroup_freezer(cgroup));
154} 154}
155 155
156/* task is frozen or will freeze immediately when next it gets woken */
157static bool is_task_frozen_enough(struct task_struct *task)
158{
159 return frozen(task) ||
160 (task_is_stopped_or_traced(task) && freezing(task));
161}
162
156/* 163/*
157 * The call to cgroup_lock() in the freezer.state write method prevents 164 * The call to cgroup_lock() in the freezer.state write method prevents
158 * a write to that file racing against an attach, and hence the 165 * a write to that file racing against an attach, and hence the
@@ -231,7 +238,7 @@ static void update_if_frozen(struct cgroup *cgroup,
231 cgroup_iter_start(cgroup, &it); 238 cgroup_iter_start(cgroup, &it);
232 while ((task = cgroup_iter_next(cgroup, &it))) { 239 while ((task = cgroup_iter_next(cgroup, &it))) {
233 ntotal++; 240 ntotal++;
234 if (frozen(task)) 241 if (is_task_frozen_enough(task))
235 nfrozen++; 242 nfrozen++;
236 } 243 }
237 244
@@ -284,7 +291,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
284 while ((task = cgroup_iter_next(cgroup, &it))) { 291 while ((task = cgroup_iter_next(cgroup, &it))) {
285 if (!freeze_task(task, true)) 292 if (!freeze_task(task, true))
286 continue; 293 continue;
287 if (frozen(task)) 294 if (is_task_frozen_enough(task))
288 continue; 295 continue;
289 if (!freezing(task) && !freezer_should_skip(task)) 296 if (!freezing(task) && !freezer_should_skip(task))
290 num_cant_freeze_now++; 297 num_cant_freeze_now++;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0e8457da6f95..d3b9df5962c2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -185,6 +185,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
185static void update_context_time(struct perf_event_context *ctx); 185static void update_context_time(struct perf_event_context *ctx);
186static u64 perf_event_time(struct perf_event *event); 186static u64 perf_event_time(struct perf_event *event);
187 187
188static void ring_buffer_attach(struct perf_event *event,
189 struct ring_buffer *rb);
190
188void __weak perf_event_print_debug(void) { } 191void __weak perf_event_print_debug(void) { }
189 192
190extern __weak const char *perf_pmu_name(void) 193extern __weak const char *perf_pmu_name(void)
@@ -2171,9 +2174,10 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
2171 */ 2174 */
2172 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2175 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2173 2176
2174 perf_event_sched_in(cpuctx, ctx, task); 2177 if (ctx->nr_events)
2178 cpuctx->task_ctx = ctx;
2175 2179
2176 cpuctx->task_ctx = ctx; 2180 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2177 2181
2178 perf_pmu_enable(ctx->pmu); 2182 perf_pmu_enable(ctx->pmu);
2179 perf_ctx_unlock(cpuctx, ctx); 2183 perf_ctx_unlock(cpuctx, ctx);
@@ -3190,12 +3194,33 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
3190 struct ring_buffer *rb; 3194 struct ring_buffer *rb;
3191 unsigned int events = POLL_HUP; 3195 unsigned int events = POLL_HUP;
3192 3196
3197 /*
3198 * Race between perf_event_set_output() and perf_poll(): perf_poll()
3199 * grabs the rb reference but perf_event_set_output() overrides it.
3200 * Here is the timeline for two threads T1, T2:
3201 * t0: T1, rb = rcu_dereference(event->rb)
3202 * t1: T2, old_rb = event->rb
3203 * t2: T2, event->rb = new rb
3204 * t3: T2, ring_buffer_detach(old_rb)
3205 * t4: T1, ring_buffer_attach(rb1)
3206 * t5: T1, poll_wait(event->waitq)
3207 *
3208 * To avoid this problem, we grab mmap_mutex in perf_poll()
3209 * thereby ensuring that the assignment of the new ring buffer
3210 * and the detachment of the old buffer appear atomic to perf_poll()
3211 */
3212 mutex_lock(&event->mmap_mutex);
3213
3193 rcu_read_lock(); 3214 rcu_read_lock();
3194 rb = rcu_dereference(event->rb); 3215 rb = rcu_dereference(event->rb);
3195 if (rb) 3216 if (rb) {
3217 ring_buffer_attach(event, rb);
3196 events = atomic_xchg(&rb->poll, 0); 3218 events = atomic_xchg(&rb->poll, 0);
3219 }
3197 rcu_read_unlock(); 3220 rcu_read_unlock();
3198 3221
3222 mutex_unlock(&event->mmap_mutex);
3223
3199 poll_wait(file, &event->waitq, wait); 3224 poll_wait(file, &event->waitq, wait);
3200 3225
3201 return events; 3226 return events;
@@ -3496,6 +3521,49 @@ unlock:
3496 return ret; 3521 return ret;
3497} 3522}
3498 3523
3524static void ring_buffer_attach(struct perf_event *event,
3525 struct ring_buffer *rb)
3526{
3527 unsigned long flags;
3528
3529 if (!list_empty(&event->rb_entry))
3530 return;
3531
3532 spin_lock_irqsave(&rb->event_lock, flags);
3533 if (!list_empty(&event->rb_entry))
3534 goto unlock;
3535
3536 list_add(&event->rb_entry, &rb->event_list);
3537unlock:
3538 spin_unlock_irqrestore(&rb->event_lock, flags);
3539}
3540
3541static void ring_buffer_detach(struct perf_event *event,
3542 struct ring_buffer *rb)
3543{
3544 unsigned long flags;
3545
3546 if (list_empty(&event->rb_entry))
3547 return;
3548
3549 spin_lock_irqsave(&rb->event_lock, flags);
3550 list_del_init(&event->rb_entry);
3551 wake_up_all(&event->waitq);
3552 spin_unlock_irqrestore(&rb->event_lock, flags);
3553}
3554
3555static void ring_buffer_wakeup(struct perf_event *event)
3556{
3557 struct ring_buffer *rb;
3558
3559 rcu_read_lock();
3560 rb = rcu_dereference(event->rb);
3561 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
3562 wake_up_all(&event->waitq);
3563 }
3564 rcu_read_unlock();
3565}
3566
3499static void rb_free_rcu(struct rcu_head *rcu_head) 3567static void rb_free_rcu(struct rcu_head *rcu_head)
3500{ 3568{
3501 struct ring_buffer *rb; 3569 struct ring_buffer *rb;
@@ -3521,9 +3589,19 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3521 3589
3522static void ring_buffer_put(struct ring_buffer *rb) 3590static void ring_buffer_put(struct ring_buffer *rb)
3523{ 3591{
3592 struct perf_event *event, *n;
3593 unsigned long flags;
3594
3524 if (!atomic_dec_and_test(&rb->refcount)) 3595 if (!atomic_dec_and_test(&rb->refcount))
3525 return; 3596 return;
3526 3597
3598 spin_lock_irqsave(&rb->event_lock, flags);
3599 list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
3600 list_del_init(&event->rb_entry);
3601 wake_up_all(&event->waitq);
3602 }
3603 spin_unlock_irqrestore(&rb->event_lock, flags);
3604
3527 call_rcu(&rb->rcu_head, rb_free_rcu); 3605 call_rcu(&rb->rcu_head, rb_free_rcu);
3528} 3606}
3529 3607
@@ -3546,6 +3624,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
3546 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); 3624 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3547 vma->vm_mm->pinned_vm -= event->mmap_locked; 3625 vma->vm_mm->pinned_vm -= event->mmap_locked;
3548 rcu_assign_pointer(event->rb, NULL); 3626 rcu_assign_pointer(event->rb, NULL);
3627 ring_buffer_detach(event, rb);
3549 mutex_unlock(&event->mmap_mutex); 3628 mutex_unlock(&event->mmap_mutex);
3550 3629
3551 ring_buffer_put(rb); 3630 ring_buffer_put(rb);
@@ -3700,7 +3779,7 @@ static const struct file_operations perf_fops = {
3700 3779
3701void perf_event_wakeup(struct perf_event *event) 3780void perf_event_wakeup(struct perf_event *event)
3702{ 3781{
3703 wake_up_all(&event->waitq); 3782 ring_buffer_wakeup(event);
3704 3783
3705 if (event->pending_kill) { 3784 if (event->pending_kill) {
3706 kill_fasync(&event->fasync, SIGIO, event->pending_kill); 3785 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
@@ -5822,6 +5901,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
5822 INIT_LIST_HEAD(&event->group_entry); 5901 INIT_LIST_HEAD(&event->group_entry);
5823 INIT_LIST_HEAD(&event->event_entry); 5902 INIT_LIST_HEAD(&event->event_entry);
5824 INIT_LIST_HEAD(&event->sibling_list); 5903 INIT_LIST_HEAD(&event->sibling_list);
5904 INIT_LIST_HEAD(&event->rb_entry);
5905
5825 init_waitqueue_head(&event->waitq); 5906 init_waitqueue_head(&event->waitq);
5826 init_irq_work(&event->pending, perf_pending_event); 5907 init_irq_work(&event->pending, perf_pending_event);
5827 5908
@@ -6028,6 +6109,8 @@ set:
6028 6109
6029 old_rb = event->rb; 6110 old_rb = event->rb;
6030 rcu_assign_pointer(event->rb, rb); 6111 rcu_assign_pointer(event->rb, rb);
6112 if (old_rb)
6113 ring_buffer_detach(event, old_rb);
6031 ret = 0; 6114 ret = 0;
6032unlock: 6115unlock:
6033 mutex_unlock(&event->mmap_mutex); 6116 mutex_unlock(&event->mmap_mutex);
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 09097dd8116c..64568a699375 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -22,6 +22,9 @@ struct ring_buffer {
22 local_t lost; /* nr records lost */ 22 local_t lost; /* nr records lost */
23 23
24 long watermark; /* wakeup watermark */ 24 long watermark; /* wakeup watermark */
25 /* poll crap */
26 spinlock_t event_lock;
27 struct list_head event_list;
25 28
26 struct perf_event_mmap_page *user_page; 29 struct perf_event_mmap_page *user_page;
27 void *data_pages[0]; 30 void *data_pages[0];
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index a2a29205cc0f..7f3011c6b57f 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -209,6 +209,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
209 rb->writable = 1; 209 rb->writable = 1;
210 210
211 atomic_set(&rb->refcount, 1); 211 atomic_set(&rb->refcount, 1);
212
213 INIT_LIST_HEAD(&rb->event_list);
214 spin_lock_init(&rb->event_lock);
212} 215}
213 216
214#ifndef CONFIG_PERF_USE_VMALLOC 217#ifndef CONFIG_PERF_USE_VMALLOC
diff --git a/kernel/fork.c b/kernel/fork.c
index ba0d17261329..da4a6a10d088 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -162,7 +162,6 @@ static void account_kernel_stack(struct thread_info *ti, int account)
162 162
163void free_task(struct task_struct *tsk) 163void free_task(struct task_struct *tsk)
164{ 164{
165 prop_local_destroy_single(&tsk->dirties);
166 account_kernel_stack(tsk->stack, -1); 165 account_kernel_stack(tsk->stack, -1);
167 free_thread_info(tsk->stack); 166 free_thread_info(tsk->stack);
168 rt_mutex_debug_task_free(tsk); 167 rt_mutex_debug_task_free(tsk);
@@ -274,10 +273,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
274 273
275 tsk->stack = ti; 274 tsk->stack = ti;
276 275
277 err = prop_local_init_single(&tsk->dirties);
278 if (err)
279 goto out;
280
281 setup_thread_stack(tsk, orig); 276 setup_thread_stack(tsk, orig);
282 clear_user_return_notifier(tsk); 277 clear_user_return_notifier(tsk);
283 clear_tsk_need_resched(tsk); 278 clear_tsk_need_resched(tsk);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 422e567eecf6..ae34bf51682b 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -885,10 +885,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
885 struct hrtimer_clock_base *base, 885 struct hrtimer_clock_base *base,
886 unsigned long newstate, int reprogram) 886 unsigned long newstate, int reprogram)
887{ 887{
888 struct timerqueue_node *next_timer;
888 if (!(timer->state & HRTIMER_STATE_ENQUEUED)) 889 if (!(timer->state & HRTIMER_STATE_ENQUEUED))
889 goto out; 890 goto out;
890 891
891 if (&timer->node == timerqueue_getnext(&base->active)) { 892 next_timer = timerqueue_getnext(&base->active);
893 timerqueue_del(&base->active, &timer->node);
894 if (&timer->node == next_timer) {
892#ifdef CONFIG_HIGH_RES_TIMERS 895#ifdef CONFIG_HIGH_RES_TIMERS
893 /* Reprogram the clock event device. if enabled */ 896 /* Reprogram the clock event device. if enabled */
894 if (reprogram && hrtimer_hres_active()) { 897 if (reprogram && hrtimer_hres_active()) {
@@ -901,7 +904,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
901 } 904 }
902#endif 905#endif
903 } 906 }
904 timerqueue_del(&base->active, &timer->node);
905 if (!timerqueue_getnext(&base->active)) 907 if (!timerqueue_getnext(&base->active))
906 base->cpu_base->active_bases &= ~(1 << base->index); 908 base->cpu_base->active_bases &= ~(1 << base->index);
907out: 909out:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 67ce837ae52c..1da999f5e746 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -623,8 +623,9 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
623 623
624static int irq_wait_for_interrupt(struct irqaction *action) 624static int irq_wait_for_interrupt(struct irqaction *action)
625{ 625{
626 set_current_state(TASK_INTERRUPTIBLE);
627
626 while (!kthread_should_stop()) { 628 while (!kthread_should_stop()) {
627 set_current_state(TASK_INTERRUPTIBLE);
628 629
629 if (test_and_clear_bit(IRQTF_RUNTHREAD, 630 if (test_and_clear_bit(IRQTF_RUNTHREAD,
630 &action->thread_flags)) { 631 &action->thread_flags)) {
@@ -632,7 +633,9 @@ static int irq_wait_for_interrupt(struct irqaction *action)
632 return 0; 633 return 0;
633 } 634 }
634 schedule(); 635 schedule();
636 set_current_state(TASK_INTERRUPTIBLE);
635 } 637 }
638 __set_current_state(TASK_RUNNING);
636 return -1; 639 return -1;
637} 640}
638 641
@@ -1596,7 +1599,7 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1596 return -ENOMEM; 1599 return -ENOMEM;
1597 1600
1598 action->handler = handler; 1601 action->handler = handler;
1599 action->flags = IRQF_PERCPU; 1602 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1600 action->name = devname; 1603 action->name = devname;
1601 action->percpu_dev_id = dev_id; 1604 action->percpu_dev_id = dev_id;
1602 1605
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index aa57d5da18c1..dc813a948be2 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -84,7 +84,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
84 */ 84 */
85 action = desc->action; 85 action = desc->action;
86 if (!action || !(action->flags & IRQF_SHARED) || 86 if (!action || !(action->flags & IRQF_SHARED) ||
87 (action->flags & __IRQF_TIMER) || !action->next) 87 (action->flags & __IRQF_TIMER) ||
88 (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
89 !action->next)
88 goto out; 90 goto out;
89 91
90 /* Already running on another processor */ 92 /* Already running on another processor */
@@ -115,7 +117,7 @@ static int misrouted_irq(int irq)
115 struct irq_desc *desc; 117 struct irq_desc *desc;
116 int i, ok = 0; 118 int i, ok = 0;
117 119
118 if (atomic_inc_return(&irq_poll_active) == 1) 120 if (atomic_inc_return(&irq_poll_active) != 1)
119 goto out; 121 goto out;
120 122
121 irq_poll_cpu = smp_processor_id(); 123 irq_poll_cpu = smp_processor_id();
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index bbdfe2a462a0..66ff7109f697 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -66,8 +66,9 @@ void jump_label_inc(struct jump_label_key *key)
66 return; 66 return;
67 67
68 jump_label_lock(); 68 jump_label_lock();
69 if (atomic_add_return(1, &key->enabled) == 1) 69 if (atomic_read(&key->enabled) == 0)
70 jump_label_update(key, JUMP_LABEL_ENABLE); 70 jump_label_update(key, JUMP_LABEL_ENABLE);
71 atomic_inc(&key->enabled);
71 jump_label_unlock(); 72 jump_label_unlock();
72} 73}
73 74
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index e69434b070da..b2e08c932d91 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -44,6 +44,7 @@
44#include <linux/stringify.h> 44#include <linux/stringify.h>
45#include <linux/bitops.h> 45#include <linux/bitops.h>
46#include <linux/gfp.h> 46#include <linux/gfp.h>
47#include <linux/kmemcheck.h>
47 48
48#include <asm/sections.h> 49#include <asm/sections.h>
49 50
@@ -2948,7 +2949,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2948void lockdep_init_map(struct lockdep_map *lock, const char *name, 2949void lockdep_init_map(struct lockdep_map *lock, const char *name,
2949 struct lock_class_key *key, int subclass) 2950 struct lock_class_key *key, int subclass)
2950{ 2951{
2951 memset(lock, 0, sizeof(*lock)); 2952 int i;
2953
2954 kmemcheck_mark_initialized(lock, sizeof(*lock));
2955
2956 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
2957 lock->class_cache[i] = NULL;
2952 2958
2953#ifdef CONFIG_LOCK_STAT 2959#ifdef CONFIG_LOCK_STAT
2954 lock->cpu = raw_smp_processor_id(); 2960 lock->cpu = raw_smp_processor_id();
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index b4511b6d3ef9..a6b0503574ee 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -55,6 +55,8 @@ enum {
55 55
56static int hibernation_mode = HIBERNATION_SHUTDOWN; 56static int hibernation_mode = HIBERNATION_SHUTDOWN;
57 57
58static bool freezer_test_done;
59
58static const struct platform_hibernation_ops *hibernation_ops; 60static const struct platform_hibernation_ops *hibernation_ops;
59 61
60/** 62/**
@@ -345,11 +347,24 @@ int hibernation_snapshot(int platform_mode)
345 347
346 error = freeze_kernel_threads(); 348 error = freeze_kernel_threads();
347 if (error) 349 if (error)
348 goto Close; 350 goto Cleanup;
351
352 if (hibernation_test(TEST_FREEZER) ||
353 hibernation_testmode(HIBERNATION_TESTPROC)) {
354
355 /*
356 * Indicate to the caller that we are returning due to a
357 * successful freezer test.
358 */
359 freezer_test_done = true;
360 goto Cleanup;
361 }
349 362
350 error = dpm_prepare(PMSG_FREEZE); 363 error = dpm_prepare(PMSG_FREEZE);
351 if (error) 364 if (error) {
352 goto Complete_devices; 365 dpm_complete(msg);
366 goto Cleanup;
367 }
353 368
354 suspend_console(); 369 suspend_console();
355 pm_restrict_gfp_mask(); 370 pm_restrict_gfp_mask();
@@ -378,8 +393,6 @@ int hibernation_snapshot(int platform_mode)
378 pm_restore_gfp_mask(); 393 pm_restore_gfp_mask();
379 394
380 resume_console(); 395 resume_console();
381
382 Complete_devices:
383 dpm_complete(msg); 396 dpm_complete(msg);
384 397
385 Close: 398 Close:
@@ -389,6 +402,10 @@ int hibernation_snapshot(int platform_mode)
389 Recover_platform: 402 Recover_platform:
390 platform_recover(platform_mode); 403 platform_recover(platform_mode);
391 goto Resume_devices; 404 goto Resume_devices;
405
406 Cleanup:
407 swsusp_free();
408 goto Close;
392} 409}
393 410
394/** 411/**
@@ -641,15 +658,13 @@ int hibernate(void)
641 if (error) 658 if (error)
642 goto Finish; 659 goto Finish;
643 660
644 if (hibernation_test(TEST_FREEZER))
645 goto Thaw;
646
647 if (hibernation_testmode(HIBERNATION_TESTPROC))
648 goto Thaw;
649
650 error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); 661 error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
651 if (error) 662 if (error)
652 goto Thaw; 663 goto Thaw;
664 if (freezer_test_done) {
665 freezer_test_done = false;
666 goto Thaw;
667 }
653 668
654 if (in_suspend) { 669 if (in_suspend) {
655 unsigned int flags = 0; 670 unsigned int flags = 0;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 71f49fe4377e..36e0f0903c32 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -290,13 +290,14 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
290 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) 290 if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
291 break; 291 break;
292 } 292 }
293 if (state < PM_SUSPEND_MAX && *s) 293 if (state < PM_SUSPEND_MAX && *s) {
294 error = enter_state(state); 294 error = enter_state(state);
295 if (error) { 295 if (error) {
296 suspend_stats.fail++; 296 suspend_stats.fail++;
297 dpm_save_failed_errno(error); 297 dpm_save_failed_errno(error);
298 } else 298 } else
299 suspend_stats.success++; 299 suspend_stats.success++;
300 }
300#endif 301#endif
301 302
302 Exit: 303 Exit:
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 56db75147186..995e3bd3417b 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -70,6 +70,7 @@ static struct pm_qos_constraints cpu_dma_constraints = {
70}; 70};
71static struct pm_qos_object cpu_dma_pm_qos = { 71static struct pm_qos_object cpu_dma_pm_qos = {
72 .constraints = &cpu_dma_constraints, 72 .constraints = &cpu_dma_constraints,
73 .name = "cpu_dma_latency",
73}; 74};
74 75
75static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); 76static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
diff --git a/kernel/printk.c b/kernel/printk.c
index 1455a0d4eedd..7982a0a841ea 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1293,10 +1293,11 @@ again:
1293 raw_spin_lock(&logbuf_lock); 1293 raw_spin_lock(&logbuf_lock);
1294 if (con_start != log_end) 1294 if (con_start != log_end)
1295 retry = 1; 1295 retry = 1;
1296 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
1297
1296 if (retry && console_trylock()) 1298 if (retry && console_trylock())
1297 goto again; 1299 goto again;
1298 1300
1299 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
1300 if (wake_klogd) 1301 if (wake_klogd)
1301 wake_up_klogd(); 1302 wake_up_klogd();
1302} 1303}
diff --git a/kernel/sched.c b/kernel/sched.c
index 0e9344a71be3..d6b149ccf925 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
71#include <linux/ctype.h> 71#include <linux/ctype.h>
72#include <linux/ftrace.h> 72#include <linux/ftrace.h>
73#include <linux/slab.h> 73#include <linux/slab.h>
74#include <linux/init_task.h>
74 75
75#include <asm/tlb.h> 76#include <asm/tlb.h>
76#include <asm/irq_regs.h> 77#include <asm/irq_regs.h>
@@ -4810,6 +4811,9 @@ EXPORT_SYMBOL(wait_for_completion);
4810 * This waits for either a completion of a specific task to be signaled or for a 4811 * This waits for either a completion of a specific task to be signaled or for a
4811 * specified timeout to expire. The timeout is in jiffies. It is not 4812 * specified timeout to expire. The timeout is in jiffies. It is not
4812 * interruptible. 4813 * interruptible.
4814 *
4815 * The return value is 0 if timed out, and positive (at least 1, or number of
4816 * jiffies left till timeout) if completed.
4813 */ 4817 */
4814unsigned long __sched 4818unsigned long __sched
4815wait_for_completion_timeout(struct completion *x, unsigned long timeout) 4819wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -4824,6 +4828,8 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
4824 * 4828 *
4825 * This waits for completion of a specific task to be signaled. It is 4829 * This waits for completion of a specific task to be signaled. It is
4826 * interruptible. 4830 * interruptible.
4831 *
4832 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
4827 */ 4833 */
4828int __sched wait_for_completion_interruptible(struct completion *x) 4834int __sched wait_for_completion_interruptible(struct completion *x)
4829{ 4835{
@@ -4841,6 +4847,9 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
4841 * 4847 *
4842 * This waits for either a completion of a specific task to be signaled or for a 4848 * This waits for either a completion of a specific task to be signaled or for a
4843 * specified timeout to expire. It is interruptible. The timeout is in jiffies. 4849 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4850 *
4851 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
4852 * positive (at least 1, or number of jiffies left till timeout) if completed.
4844 */ 4853 */
4845long __sched 4854long __sched
4846wait_for_completion_interruptible_timeout(struct completion *x, 4855wait_for_completion_interruptible_timeout(struct completion *x,
@@ -4856,6 +4865,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4856 * 4865 *
4857 * This waits to be signaled for completion of a specific task. It can be 4866 * This waits to be signaled for completion of a specific task. It can be
4858 * interrupted by a kill signal. 4867 * interrupted by a kill signal.
4868 *
4869 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
4859 */ 4870 */
4860int __sched wait_for_completion_killable(struct completion *x) 4871int __sched wait_for_completion_killable(struct completion *x)
4861{ 4872{
@@ -4874,6 +4885,9 @@ EXPORT_SYMBOL(wait_for_completion_killable);
4874 * This waits for either a completion of a specific task to be 4885 * This waits for either a completion of a specific task to be
4875 * signaled or for a specified timeout to expire. It can be 4886 * signaled or for a specified timeout to expire. It can be
4876 * interrupted by a kill signal. The timeout is in jiffies. 4887 * interrupted by a kill signal. The timeout is in jiffies.
4888 *
4889 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
4890 * positive (at least 1, or number of jiffies left till timeout) if completed.
4877 */ 4891 */
4878long __sched 4892long __sched
4879wait_for_completion_killable_timeout(struct completion *x, 4893wait_for_completion_killable_timeout(struct completion *x,
@@ -6099,6 +6113,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6099 */ 6113 */
6100 idle->sched_class = &idle_sched_class; 6114 idle->sched_class = &idle_sched_class;
6101 ftrace_graph_init_idle_task(idle, cpu); 6115 ftrace_graph_init_idle_task(idle, cpu);
6116#if defined(CONFIG_SMP)
6117 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
6118#endif
6102} 6119}
6103 6120
6104/* 6121/*
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5c9e67923b7c..a78ed2736ba7 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -772,19 +772,32 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
772 list_del_leaf_cfs_rq(cfs_rq); 772 list_del_leaf_cfs_rq(cfs_rq);
773} 773}
774 774
775static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
776{
777 long tg_weight;
778
779 /*
780 * Use this CPU's actual weight instead of the last load_contribution
781 * to gain a more accurate current total weight. See
782 * update_cfs_rq_load_contribution().
783 */
784 tg_weight = atomic_read(&tg->load_weight);
785 tg_weight -= cfs_rq->load_contribution;
786 tg_weight += cfs_rq->load.weight;
787
788 return tg_weight;
789}
790
775static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) 791static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
776{ 792{
777 long load_weight, load, shares; 793 long tg_weight, load, shares;
778 794
795 tg_weight = calc_tg_weight(tg, cfs_rq);
779 load = cfs_rq->load.weight; 796 load = cfs_rq->load.weight;
780 797
781 load_weight = atomic_read(&tg->load_weight);
782 load_weight += load;
783 load_weight -= cfs_rq->load_contribution;
784
785 shares = (tg->shares * load); 798 shares = (tg->shares * load);
786 if (load_weight) 799 if (tg_weight)
787 shares /= load_weight; 800 shares /= tg_weight;
788 801
789 if (shares < MIN_SHARES) 802 if (shares < MIN_SHARES)
790 shares = MIN_SHARES; 803 shares = MIN_SHARES;
@@ -1743,7 +1756,7 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1743 1756
1744static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 1757static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1745{ 1758{
1746 if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running) 1759 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
1747 return; 1760 return;
1748 1761
1749 __return_cfs_rq_runtime(cfs_rq); 1762 __return_cfs_rq_runtime(cfs_rq);
@@ -2036,36 +2049,100 @@ static void task_waking_fair(struct task_struct *p)
2036 * Adding load to a group doesn't make a group heavier, but can cause movement 2049 * Adding load to a group doesn't make a group heavier, but can cause movement
2037 * of group shares between cpus. Assuming the shares were perfectly aligned one 2050 * of group shares between cpus. Assuming the shares were perfectly aligned one
2038 * can calculate the shift in shares. 2051 * can calculate the shift in shares.
2052 *
2053 * Calculate the effective load difference if @wl is added (subtracted) to @tg
2054 * on this @cpu and results in a total addition (subtraction) of @wg to the
2055 * total group weight.
2056 *
2057 * Given a runqueue weight distribution (rw_i) we can compute a shares
2058 * distribution (s_i) using:
2059 *
2060 * s_i = rw_i / \Sum rw_j (1)
2061 *
2062 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
2063 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
2064 * shares distribution (s_i):
2065 *
2066 * rw_i = { 2, 4, 1, 0 }
2067 * s_i = { 2/7, 4/7, 1/7, 0 }
2068 *
2069 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
2070 * task used to run on and the CPU the waker is running on), we need to
2071 * compute the effect of waking a task on either CPU and, in case of a sync
2072 * wakeup, compute the effect of the current task going to sleep.
2073 *
2074 * So for a change of @wl to the local @cpu with an overall group weight change
2075 * of @wl we can compute the new shares distribution (s'_i) using:
2076 *
2077 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
2078 *
2079 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
2080 * differences in waking a task to CPU 0. The additional task changes the
2081 * weight and shares distributions like:
2082 *
2083 * rw'_i = { 3, 4, 1, 0 }
2084 * s'_i = { 3/8, 4/8, 1/8, 0 }
2085 *
2086 * We can then compute the difference in effective weight by using:
2087 *
2088 * dw_i = S * (s'_i - s_i) (3)
2089 *
2090 * Where 'S' is the group weight as seen by its parent.
2091 *
2092 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
2093 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
2094 * 4/7) times the weight of the group.
2039 */ 2095 */
2040static long effective_load(struct task_group *tg, int cpu, long wl, long wg) 2096static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
2041{ 2097{
2042 struct sched_entity *se = tg->se[cpu]; 2098 struct sched_entity *se = tg->se[cpu];
2043 2099
2044 if (!tg->parent) 2100 if (!tg->parent) /* the trivial, non-cgroup case */
2045 return wl; 2101 return wl;
2046 2102
2047 for_each_sched_entity(se) { 2103 for_each_sched_entity(se) {
2048 long lw, w; 2104 long w, W;
2049 2105
2050 tg = se->my_q->tg; 2106 tg = se->my_q->tg;
2051 w = se->my_q->load.weight;
2052 2107
2053 /* use this cpu's instantaneous contribution */ 2108 /*
2054 lw = atomic_read(&tg->load_weight); 2109 * W = @wg + \Sum rw_j
2055 lw -= se->my_q->load_contribution; 2110 */
2056 lw += w + wg; 2111 W = wg + calc_tg_weight(tg, se->my_q);
2057 2112
2058 wl += w; 2113 /*
2114 * w = rw_i + @wl
2115 */
2116 w = se->my_q->load.weight + wl;
2059 2117
2060 if (lw > 0 && wl < lw) 2118 /*
2061 wl = (wl * tg->shares) / lw; 2119 * wl = S * s'_i; see (2)
2120 */
2121 if (W > 0 && w < W)
2122 wl = (w * tg->shares) / W;
2062 else 2123 else
2063 wl = tg->shares; 2124 wl = tg->shares;
2064 2125
2065 /* zero point is MIN_SHARES */ 2126 /*
2127 * Per the above, wl is the new se->load.weight value; since
2128 * those are clipped to [MIN_SHARES, ...) do so now. See
2129 * calc_cfs_shares().
2130 */
2066 if (wl < MIN_SHARES) 2131 if (wl < MIN_SHARES)
2067 wl = MIN_SHARES; 2132 wl = MIN_SHARES;
2133
2134 /*
2135 * wl = dw_i = S * (s'_i - s_i); see (3)
2136 */
2068 wl -= se->load.weight; 2137 wl -= se->load.weight;
2138
2139 /*
2140 * Recursively apply this logic to all parent groups to compute
2141 * the final effective load change on the root group. Since
2142 * only the @tg group gets extra weight, all parent groups can
2143 * only redistribute existing shares. @wl is the shift in shares
2144 * resulting from this level per the above.
2145 */
2069 wg = 0; 2146 wg = 0;
2070 } 2147 }
2071 2148
@@ -2249,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
2249 int cpu = smp_processor_id(); 2326 int cpu = smp_processor_id();
2250 int prev_cpu = task_cpu(p); 2327 int prev_cpu = task_cpu(p);
2251 struct sched_domain *sd; 2328 struct sched_domain *sd;
2252 int i; 2329 struct sched_group *sg;
2330 int i, smt = 0;
2253 2331
2254 /* 2332 /*
2255 * If the task is going to be woken-up on this cpu and if it is 2333 * If the task is going to be woken-up on this cpu and if it is
@@ -2269,25 +2347,38 @@ static int select_idle_sibling(struct task_struct *p, int target)
2269 * Otherwise, iterate the domains and find an elegible idle cpu. 2347 * Otherwise, iterate the domains and find an elegible idle cpu.
2270 */ 2348 */
2271 rcu_read_lock(); 2349 rcu_read_lock();
2350again:
2272 for_each_domain(target, sd) { 2351 for_each_domain(target, sd) {
2273 if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) 2352 if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
2274 break; 2353 continue;
2275 2354
2276 for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) { 2355 if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
2277 if (idle_cpu(i)) { 2356 if (!smt) {
2278 target = i; 2357 smt = 1;
2279 break; 2358 goto again;
2280 } 2359 }
2360 break;
2281 } 2361 }
2282 2362
2283 /* 2363 sg = sd->groups;
2284 * Lets stop looking for an idle sibling when we reached 2364 do {
2285 * the domain that spans the current cpu and prev_cpu. 2365 if (!cpumask_intersects(sched_group_cpus(sg),
2286 */ 2366 tsk_cpus_allowed(p)))
2287 if (cpumask_test_cpu(cpu, sched_domain_span(sd)) && 2367 goto next;
2288 cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) 2368
2289 break; 2369 for_each_cpu(i, sched_group_cpus(sg)) {
2370 if (!idle_cpu(i))
2371 goto next;
2372 }
2373
2374 target = cpumask_first_and(sched_group_cpus(sg),
2375 tsk_cpus_allowed(p));
2376 goto done;
2377next:
2378 sg = sg->next;
2379 } while (sg != sd->groups);
2290 } 2380 }
2381done:
2291 rcu_read_unlock(); 2382 rcu_read_unlock();
2292 2383
2293 return target; 2384 return target;
@@ -3511,7 +3602,7 @@ static bool update_sd_pick_busiest(struct sched_domain *sd,
3511} 3602}
3512 3603
3513/** 3604/**
3514 * update_sd_lb_stats - Update sched_group's statistics for load balancing. 3605 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
3515 * @sd: sched_domain whose statistics are to be updated. 3606 * @sd: sched_domain whose statistics are to be updated.
3516 * @this_cpu: Cpu for which load balance is currently performed. 3607 * @this_cpu: Cpu for which load balance is currently performed.
3517 * @idle: Idle status of this_cpu 3608 * @idle: Idle status of this_cpu
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index efa0a7b75dde..84802245abd2 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -67,3 +67,4 @@ SCHED_FEAT(NONTASK_POWER, 1)
67SCHED_FEAT(TTWU_QUEUE, 1) 67SCHED_FEAT(TTWU_QUEUE, 1)
68 68
69SCHED_FEAT(FORCE_SD_OVERLAP, 0) 69SCHED_FEAT(FORCE_SD_OVERLAP, 0)
70SCHED_FEAT(RT_RUNTIME_SHARE, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 056cbd2e2a27..583a1368afe6 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -560,6 +560,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
560{ 560{
561 int more = 0; 561 int more = 0;
562 562
563 if (!sched_feat(RT_RUNTIME_SHARE))
564 return more;
565
563 if (rt_rq->rt_time > rt_rq->rt_runtime) { 566 if (rt_rq->rt_time > rt_rq->rt_runtime) {
564 raw_spin_unlock(&rt_rq->rt_runtime_lock); 567 raw_spin_unlock(&rt_rq->rt_runtime_lock);
565 more = do_balance_runtime(rt_rq); 568 more = do_balance_runtime(rt_rq);
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index c436e790b21b..8a46f5d64504 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -195,7 +195,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
195 struct alarm *alarm; 195 struct alarm *alarm;
196 ktime_t expired = next->expires; 196 ktime_t expired = next->expires;
197 197
198 if (expired.tv64 >= now.tv64) 198 if (expired.tv64 > now.tv64)
199 break; 199 break;
200 200
201 alarm = container_of(next, struct alarm, node); 201 alarm = container_of(next, struct alarm, node);
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 1ecd6ba36d6c..c4eb71c8b2ea 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -387,6 +387,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
387 * released list and do a notify add later. 387 * released list and do a notify add later.
388 */ 388 */
389 if (old) { 389 if (old) {
390 old->event_handler = clockevents_handle_noop;
390 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); 391 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
391 list_del(&old->list); 392 list_del(&old->list);
392 list_add(&old->list, &clockevents_released); 393 list_add(&old->list, &clockevents_released);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index cf52fda2e096..da2f760e780c 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -492,6 +492,22 @@ void clocksource_touch_watchdog(void)
492} 492}
493 493
494/** 494/**
495 * clocksource_max_adjustment- Returns max adjustment amount
496 * @cs: Pointer to clocksource
497 *
498 */
499static u32 clocksource_max_adjustment(struct clocksource *cs)
500{
501 u64 ret;
502 /*
503 * We won't try to correct for more then 11% adjustments (110,000 ppm),
504 */
505 ret = (u64)cs->mult * 11;
506 do_div(ret,100);
507 return (u32)ret;
508}
509
510/**
495 * clocksource_max_deferment - Returns max time the clocksource can be deferred 511 * clocksource_max_deferment - Returns max time the clocksource can be deferred
496 * @cs: Pointer to clocksource 512 * @cs: Pointer to clocksource
497 * 513 *
@@ -503,25 +519,28 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
503 /* 519 /*
504 * Calculate the maximum number of cycles that we can pass to the 520 * Calculate the maximum number of cycles that we can pass to the
505 * cyc2ns function without overflowing a 64-bit signed result. The 521 * cyc2ns function without overflowing a 64-bit signed result. The
506 * maximum number of cycles is equal to ULLONG_MAX/cs->mult which 522 * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj)
507 * is equivalent to the below. 523 * which is equivalent to the below.
508 * max_cycles < (2^63)/cs->mult 524 * max_cycles < (2^63)/(cs->mult + cs->maxadj)
509 * max_cycles < 2^(log2((2^63)/cs->mult)) 525 * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj)))
510 * max_cycles < 2^(log2(2^63) - log2(cs->mult)) 526 * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj))
511 * max_cycles < 2^(63 - log2(cs->mult)) 527 * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj))
512 * max_cycles < 1 << (63 - log2(cs->mult)) 528 * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj))
513 * Please note that we add 1 to the result of the log2 to account for 529 * Please note that we add 1 to the result of the log2 to account for
514 * any rounding errors, ensure the above inequality is satisfied and 530 * any rounding errors, ensure the above inequality is satisfied and
515 * no overflow will occur. 531 * no overflow will occur.
516 */ 532 */
517 max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1)); 533 max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
518 534
519 /* 535 /*
520 * The actual maximum number of cycles we can defer the clocksource is 536 * The actual maximum number of cycles we can defer the clocksource is
521 * determined by the minimum of max_cycles and cs->mask. 537 * determined by the minimum of max_cycles and cs->mask.
538 * Note: Here we subtract the maxadj to make sure we don't sleep for
539 * too long if there's a large negative adjustment.
522 */ 540 */
523 max_cycles = min_t(u64, max_cycles, (u64) cs->mask); 541 max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
524 max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift); 542 max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
543 cs->shift);
525 544
526 /* 545 /*
527 * To ensure that the clocksource does not wrap whilst we are idle, 546 * To ensure that the clocksource does not wrap whilst we are idle,
@@ -529,7 +548,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
529 * note a margin of 12.5% is used because this can be computed with 548 * note a margin of 12.5% is used because this can be computed with
530 * a shift, versus say 10% which would require division. 549 * a shift, versus say 10% which would require division.
531 */ 550 */
532 return max_nsecs - (max_nsecs >> 5); 551 return max_nsecs - (max_nsecs >> 3);
533} 552}
534 553
535#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET 554#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
@@ -640,7 +659,6 @@ static void clocksource_enqueue(struct clocksource *cs)
640void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) 659void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
641{ 660{
642 u64 sec; 661 u64 sec;
643
644 /* 662 /*
645 * Calc the maximum number of seconds which we can run before 663 * Calc the maximum number of seconds which we can run before
646 * wrapping around. For clocksources which have a mask > 32bit 664 * wrapping around. For clocksources which have a mask > 32bit
@@ -651,7 +669,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
651 * ~ 0.06ppm granularity for NTP. We apply the same 12.5% 669 * ~ 0.06ppm granularity for NTP. We apply the same 12.5%
652 * margin as we do in clocksource_max_deferment() 670 * margin as we do in clocksource_max_deferment()
653 */ 671 */
654 sec = (cs->mask - (cs->mask >> 5)); 672 sec = (cs->mask - (cs->mask >> 3));
655 do_div(sec, freq); 673 do_div(sec, freq);
656 do_div(sec, scale); 674 do_div(sec, scale);
657 if (!sec) 675 if (!sec)
@@ -661,6 +679,20 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
661 679
662 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 680 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
663 NSEC_PER_SEC / scale, sec * scale); 681 NSEC_PER_SEC / scale, sec * scale);
682
683 /*
684 * for clocksources that have large mults, to avoid overflow.
685 * Since mult may be adjusted by ntp, add an safety extra margin
686 *
687 */
688 cs->maxadj = clocksource_max_adjustment(cs);
689 while ((cs->mult + cs->maxadj < cs->mult)
690 || (cs->mult - cs->maxadj > cs->mult)) {
691 cs->mult >>= 1;
692 cs->shift--;
693 cs->maxadj = clocksource_max_adjustment(cs);
694 }
695
664 cs->max_idle_ns = clocksource_max_deferment(cs); 696 cs->max_idle_ns = clocksource_max_deferment(cs);
665} 697}
666EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); 698EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
@@ -701,6 +733,12 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
701 */ 733 */
702int clocksource_register(struct clocksource *cs) 734int clocksource_register(struct clocksource *cs)
703{ 735{
736 /* calculate max adjustment for given mult/shift */
737 cs->maxadj = clocksource_max_adjustment(cs);
738 WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
739 "Clocksource %s might overflow on 11%% adjustment\n",
740 cs->name);
741
704 /* calculate max idle time permitted for this clocksource */ 742 /* calculate max idle time permitted for this clocksource */
705 cs->max_idle_ns = clocksource_max_deferment(cs); 743 cs->max_idle_ns = clocksource_max_deferment(cs);
706 744
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f954282d9a82..fd4a7b1625a2 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
71 (dev->features & CLOCK_EVT_FEAT_C3STOP)) 71 (dev->features & CLOCK_EVT_FEAT_C3STOP))
72 return 0; 72 return 0;
73 73
74 clockevents_exchange_device(NULL, dev); 74 clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
75 tick_broadcast_device.evtdev = dev; 75 tick_broadcast_device.evtdev = dev;
76 if (!cpumask_empty(tick_get_broadcast_mask())) 76 if (!cpumask_empty(tick_get_broadcast_mask()))
77 tick_broadcast_start_periodic(dev); 77 tick_broadcast_start_periodic(dev);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 2b021b0e8507..237841378c03 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -249,6 +249,8 @@ ktime_t ktime_get(void)
249 secs = xtime.tv_sec + wall_to_monotonic.tv_sec; 249 secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
250 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; 250 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
251 nsecs += timekeeping_get_ns(); 251 nsecs += timekeeping_get_ns();
252 /* If arch requires, add in gettimeoffset() */
253 nsecs += arch_gettimeoffset();
252 254
253 } while (read_seqretry(&xtime_lock, seq)); 255 } while (read_seqretry(&xtime_lock, seq));
254 /* 256 /*
@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts)
280 *ts = xtime; 282 *ts = xtime;
281 tomono = wall_to_monotonic; 283 tomono = wall_to_monotonic;
282 nsecs = timekeeping_get_ns(); 284 nsecs = timekeeping_get_ns();
285 /* If arch requires, add in gettimeoffset() */
286 nsecs += arch_gettimeoffset();
283 287
284 } while (read_seqretry(&xtime_lock, seq)); 288 } while (read_seqretry(&xtime_lock, seq));
285 289
@@ -802,14 +806,44 @@ static void timekeeping_adjust(s64 offset)
802 s64 error, interval = timekeeper.cycle_interval; 806 s64 error, interval = timekeeper.cycle_interval;
803 int adj; 807 int adj;
804 808
809 /*
810 * The point of this is to check if the error is greater then half
811 * an interval.
812 *
813 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
814 *
815 * Note we subtract one in the shift, so that error is really error*2.
816 * This "saves" dividing(shifting) intererval twice, but keeps the
817 * (error > interval) comparision as still measuring if error is
818 * larger then half an interval.
819 *
820 * Note: It does not "save" on aggrivation when reading the code.
821 */
805 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); 822 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
806 if (error > interval) { 823 if (error > interval) {
824 /*
825 * We now divide error by 4(via shift), which checks if
826 * the error is greater then twice the interval.
827 * If it is greater, we need a bigadjust, if its smaller,
828 * we can adjust by 1.
829 */
807 error >>= 2; 830 error >>= 2;
831 /*
832 * XXX - In update_wall_time, we round up to the next
833 * nanosecond, and store the amount rounded up into
834 * the error. This causes the likely below to be unlikely.
835 *
836 * The properfix is to avoid rounding up by using
837 * the high precision timekeeper.xtime_nsec instead of
838 * xtime.tv_nsec everywhere. Fixing this will take some
839 * time.
840 */
808 if (likely(error <= interval)) 841 if (likely(error <= interval))
809 adj = 1; 842 adj = 1;
810 else 843 else
811 adj = timekeeping_bigadjust(error, &interval, &offset); 844 adj = timekeeping_bigadjust(error, &interval, &offset);
812 } else if (error < -interval) { 845 } else if (error < -interval) {
846 /* See comment above, this is just switched for the negative */
813 error >>= 2; 847 error >>= 2;
814 if (likely(error >= -interval)) { 848 if (likely(error >= -interval)) {
815 adj = -1; 849 adj = -1;
@@ -817,9 +851,65 @@ static void timekeeping_adjust(s64 offset)
817 offset = -offset; 851 offset = -offset;
818 } else 852 } else
819 adj = timekeeping_bigadjust(error, &interval, &offset); 853 adj = timekeeping_bigadjust(error, &interval, &offset);
820 } else 854 } else /* No adjustment needed */
821 return; 855 return;
822 856
857 WARN_ONCE(timekeeper.clock->maxadj &&
858 (timekeeper.mult + adj > timekeeper.clock->mult +
859 timekeeper.clock->maxadj),
860 "Adjusting %s more then 11%% (%ld vs %ld)\n",
861 timekeeper.clock->name, (long)timekeeper.mult + adj,
862 (long)timekeeper.clock->mult +
863 timekeeper.clock->maxadj);
864 /*
865 * So the following can be confusing.
866 *
867 * To keep things simple, lets assume adj == 1 for now.
868 *
869 * When adj != 1, remember that the interval and offset values
870 * have been appropriately scaled so the math is the same.
871 *
872 * The basic idea here is that we're increasing the multiplier
873 * by one, this causes the xtime_interval to be incremented by
874 * one cycle_interval. This is because:
875 * xtime_interval = cycle_interval * mult
876 * So if mult is being incremented by one:
877 * xtime_interval = cycle_interval * (mult + 1)
878 * Its the same as:
879 * xtime_interval = (cycle_interval * mult) + cycle_interval
880 * Which can be shortened to:
881 * xtime_interval += cycle_interval
882 *
883 * So offset stores the non-accumulated cycles. Thus the current
884 * time (in shifted nanoseconds) is:
885 * now = (offset * adj) + xtime_nsec
886 * Now, even though we're adjusting the clock frequency, we have
887 * to keep time consistent. In other words, we can't jump back
888 * in time, and we also want to avoid jumping forward in time.
889 *
890 * So given the same offset value, we need the time to be the same
891 * both before and after the freq adjustment.
892 * now = (offset * adj_1) + xtime_nsec_1
893 * now = (offset * adj_2) + xtime_nsec_2
894 * So:
895 * (offset * adj_1) + xtime_nsec_1 =
896 * (offset * adj_2) + xtime_nsec_2
897 * And we know:
898 * adj_2 = adj_1 + 1
899 * So:
900 * (offset * adj_1) + xtime_nsec_1 =
901 * (offset * (adj_1+1)) + xtime_nsec_2
902 * (offset * adj_1) + xtime_nsec_1 =
903 * (offset * adj_1) + offset + xtime_nsec_2
904 * Canceling the sides:
905 * xtime_nsec_1 = offset + xtime_nsec_2
906 * Which gives us:
907 * xtime_nsec_2 = xtime_nsec_1 - offset
908 * Which simplfies to:
909 * xtime_nsec -= offset
910 *
911 * XXX - TODO: Doc ntp_error calculation.
912 */
823 timekeeper.mult += adj; 913 timekeeper.mult += adj;
824 timekeeper.xtime_interval += interval; 914 timekeeper.xtime_interval += interval;
825 timekeeper.xtime_nsec -= offset; 915 timekeeper.xtime_nsec -= offset;
diff --git a/kernel/timer.c b/kernel/timer.c
index dbaa62422b13..9c3c62b0c4bc 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1368,7 +1368,7 @@ SYSCALL_DEFINE0(getppid)
1368 int pid; 1368 int pid;
1369 1369
1370 rcu_read_lock(); 1370 rcu_read_lock();
1371 pid = task_tgid_vnr(current->real_parent); 1371 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
1372 rcu_read_unlock(); 1372 rcu_read_unlock();
1373 1373
1374 return pid; 1374 return pid;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 900b409543db..b1e8943fed1d 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -152,7 +152,6 @@ void clear_ftrace_function(void)
152 ftrace_pid_function = ftrace_stub; 152 ftrace_pid_function = ftrace_stub;
153} 153}
154 154
155#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
156#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 155#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
157/* 156/*
158 * For those archs that do not test ftrace_trace_stop in their 157 * For those archs that do not test ftrace_trace_stop in their
@@ -1212,7 +1211,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1212 if (!src->count) { 1211 if (!src->count) {
1213 free_ftrace_hash_rcu(*dst); 1212 free_ftrace_hash_rcu(*dst);
1214 rcu_assign_pointer(*dst, EMPTY_HASH); 1213 rcu_assign_pointer(*dst, EMPTY_HASH);
1215 return 0; 1214 /* still need to update the function records */
1215 ret = 0;
1216 goto out;
1216 } 1217 }
1217 1218
1218 /* 1219 /*
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 581876f9f387..c212a7f934ec 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1078,7 +1078,6 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
1078 /* First see if we did not already create this dir */ 1078 /* First see if we did not already create this dir */
1079 list_for_each_entry(system, &event_subsystems, list) { 1079 list_for_each_entry(system, &event_subsystems, list) {
1080 if (strcmp(system->name, name) == 0) { 1080 if (strcmp(system->name, name) == 0) {
1081 __get_system(system);
1082 system->nr_events++; 1081 system->nr_events++;
1083 return system->entry; 1082 return system->entry;
1084 } 1083 }
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 816d3d074979..95dc31efd6dd 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1649,7 +1649,9 @@ static int replace_system_preds(struct event_subsystem *system,
1649 */ 1649 */
1650 err = replace_preds(call, NULL, ps, filter_string, true); 1650 err = replace_preds(call, NULL, ps, filter_string, true);
1651 if (err) 1651 if (err)
1652 goto fail; 1652 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1653 else
1654 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1653 } 1655 }
1654 1656
1655 list_for_each_entry(call, &ftrace_events, list) { 1657 list_for_each_entry(call, &ftrace_events, list) {
@@ -1658,6 +1660,9 @@ static int replace_system_preds(struct event_subsystem *system,
1658 if (strcmp(call->class->system, system->name) != 0) 1660 if (strcmp(call->class->system, system->name) != 0)
1659 continue; 1661 continue;
1660 1662
1663 if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
1664 continue;
1665
1661 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); 1666 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1662 if (!filter_item) 1667 if (!filter_item)
1663 goto fail_mem; 1668 goto fail_mem;
@@ -1686,7 +1691,7 @@ static int replace_system_preds(struct event_subsystem *system,
1686 * replace the filter for the call. 1691 * replace the filter for the call.
1687 */ 1692 */
1688 filter = call->filter; 1693 filter = call->filter;
1689 call->filter = filter_item->filter; 1694 rcu_assign_pointer(call->filter, filter_item->filter);
1690 filter_item->filter = filter; 1695 filter_item->filter = filter;
1691 1696
1692 fail = false; 1697 fail = false;
@@ -1741,7 +1746,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1741 filter = call->filter; 1746 filter = call->filter;
1742 if (!filter) 1747 if (!filter)
1743 goto out_unlock; 1748 goto out_unlock;
1744 call->filter = NULL; 1749 RCU_INIT_POINTER(call->filter, NULL);
1745 /* Make sure the filter is not being used */ 1750 /* Make sure the filter is not being used */
1746 synchronize_sched(); 1751 synchronize_sched();
1747 __free_filter(filter); 1752 __free_filter(filter);
@@ -1782,7 +1787,7 @@ out:
1782 * string 1787 * string
1783 */ 1788 */
1784 tmp = call->filter; 1789 tmp = call->filter;
1785 call->filter = filter; 1790 rcu_assign_pointer(call->filter, filter);
1786 if (tmp) { 1791 if (tmp) {
1787 /* Make sure the call is done with the filter */ 1792 /* Make sure the call is done with the filter */
1788 synchronize_sched(); 1793 synchronize_sched();
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 74c6c7fce749..fea790a2b176 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -245,7 +245,7 @@ static void put_hash_bucket(struct hash_bucket *bucket,
245 245
246static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) 246static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
247{ 247{
248 return ((a->dev_addr == a->dev_addr) && 248 return ((a->dev_addr == b->dev_addr) &&
249 (a->dev == b->dev)) ? true : false; 249 (a->dev == b->dev)) ? true : false;
250} 250}
251 251
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index a0860640378d..71034f41a2ba 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -724,6 +724,14 @@ void bdi_destroy(struct backing_dev_info *bdi)
724 724
725 bdi_unregister(bdi); 725 bdi_unregister(bdi);
726 726
727 /*
728 * If bdi_unregister() had already been called earlier, the
729 * wakeup_timer could still be armed because bdi_prune_sb()
730 * can race with the bdi_wakeup_thread_delayed() calls from
731 * __mark_inode_dirty().
732 */
733 del_timer_sync(&bdi->wb.wakeup_timer);
734
727 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) 735 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
728 percpu_counter_destroy(&bdi->bdi_stat[i]); 736 percpu_counter_destroy(&bdi->bdi_stat[i]);
729 737
diff --git a/mm/filemap.c b/mm/filemap.c
index c0018f2d50e0..c106d3b3cc64 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2407,7 +2407,6 @@ static ssize_t generic_perform_write(struct file *file,
2407 iov_iter_count(i)); 2407 iov_iter_count(i));
2408 2408
2409again: 2409again:
2410
2411 /* 2410 /*
2412 * Bring in the user page that we will copy from _first_. 2411 * Bring in the user page that we will copy from _first_.
2413 * Otherwise there's a nasty deadlock on copying from the 2412 * Otherwise there's a nasty deadlock on copying from the
@@ -2463,7 +2462,10 @@ again:
2463 written += copied; 2462 written += copied;
2464 2463
2465 balance_dirty_pages_ratelimited(mapping); 2464 balance_dirty_pages_ratelimited(mapping);
2466 2465 if (fatal_signal_pending(current)) {
2466 status = -EINTR;
2467 break;
2468 }
2467 } while (iov_iter_count(i)); 2469 } while (iov_iter_count(i));
2468 2470
2469 return written ? written : status; 2471 return written ? written : status;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4298abaae153..36b3d988b4ef 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2259,12 +2259,8 @@ static void khugepaged_do_scan(struct page **hpage)
2259 2259
2260static void khugepaged_alloc_sleep(void) 2260static void khugepaged_alloc_sleep(void)
2261{ 2261{
2262 DEFINE_WAIT(wait); 2262 wait_event_freezable_timeout(khugepaged_wait, false,
2263 add_wait_queue(&khugepaged_wait, &wait); 2263 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2264 schedule_timeout_interruptible(
2265 msecs_to_jiffies(
2266 khugepaged_alloc_sleep_millisecs));
2267 remove_wait_queue(&khugepaged_wait, &wait);
2268} 2264}
2269 2265
2270#ifndef CONFIG_NUMA 2266#ifndef CONFIG_NUMA
@@ -2313,14 +2309,10 @@ static void khugepaged_loop(void)
2313 if (unlikely(kthread_should_stop())) 2309 if (unlikely(kthread_should_stop()))
2314 break; 2310 break;
2315 if (khugepaged_has_work()) { 2311 if (khugepaged_has_work()) {
2316 DEFINE_WAIT(wait);
2317 if (!khugepaged_scan_sleep_millisecs) 2312 if (!khugepaged_scan_sleep_millisecs)
2318 continue; 2313 continue;
2319 add_wait_queue(&khugepaged_wait, &wait); 2314 wait_event_freezable_timeout(khugepaged_wait, false,
2320 schedule_timeout_interruptible( 2315 msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2321 msecs_to_jiffies(
2322 khugepaged_scan_sleep_millisecs));
2323 remove_wait_queue(&khugepaged_wait, &wait);
2324 } else if (khugepaged_enabled()) 2316 } else if (khugepaged_enabled())
2325 wait_event_freezable(khugepaged_wait, 2317 wait_event_freezable(khugepaged_wait,
2326 khugepaged_wait_event()); 2318 khugepaged_wait_event());
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index dae27ba3be2c..73f17c0293c0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -576,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
576 __SetPageHead(page); 576 __SetPageHead(page);
577 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 577 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
578 __SetPageTail(p); 578 __SetPageTail(p);
579 set_page_count(p, 0);
579 p->first_page = page; 580 p->first_page = page;
580 } 581 }
581} 582}
@@ -2422,6 +2423,8 @@ retry_avoidcopy:
2422 * anon_vma prepared. 2423 * anon_vma prepared.
2423 */ 2424 */
2424 if (unlikely(anon_vma_prepare(vma))) { 2425 if (unlikely(anon_vma_prepare(vma))) {
2426 page_cache_release(new_page);
2427 page_cache_release(old_page);
2425 /* Caller expects lock to be held */ 2428 /* Caller expects lock to be held */
2426 spin_lock(&mm->page_table_lock); 2429 spin_lock(&mm->page_table_lock);
2427 return VM_FAULT_OOM; 2430 return VM_FAULT_OOM;
diff --git a/mm/migrate.c b/mm/migrate.c
index 578e29174fa6..177aca424a06 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -871,9 +871,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
871 871
872 if (anon_vma) 872 if (anon_vma)
873 put_anon_vma(anon_vma); 873 put_anon_vma(anon_vma);
874out:
875 unlock_page(hpage); 874 unlock_page(hpage);
876 875
876out:
877 if (rc != -EAGAIN) { 877 if (rc != -EAGAIN) {
878 list_del(&hpage->lru); 878 list_del(&hpage->lru);
879 put_page(hpage); 879 put_page(hpage);
diff --git a/mm/nommu.c b/mm/nommu.c
index 73419c55eda6..b982290fd962 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -454,7 +454,7 @@ void __attribute__((weak)) vmalloc_sync_all(void)
454 * between processes, it syncs the pagetable across all 454 * between processes, it syncs the pagetable across all
455 * processes. 455 * processes.
456 */ 456 */
457struct vm_struct *alloc_vm_area(size_t size) 457struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
458{ 458{
459 BUG(); 459 BUG();
460 return NULL; 460 return NULL;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 471dedb463ab..76f2c5ae908e 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -185,6 +185,11 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
185 if (!p) 185 if (!p)
186 return 0; 186 return 0;
187 187
188 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
189 task_unlock(p);
190 return 0;
191 }
192
188 /* 193 /*
189 * The memory controller may have a limit of 0 bytes, so avoid a divide 194 * The memory controller may have a limit of 0 bytes, so avoid a divide
190 * by zero, if necessary. 195 * by zero, if necessary.
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index a3278f005230..50f08241f981 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -128,7 +128,6 @@ unsigned long global_dirty_limit;
128 * 128 *
129 */ 129 */
130static struct prop_descriptor vm_completions; 130static struct prop_descriptor vm_completions;
131static struct prop_descriptor vm_dirties;
132 131
133/* 132/*
134 * couple the period to the dirty_ratio: 133 * couple the period to the dirty_ratio:
@@ -154,7 +153,6 @@ static void update_completion_period(void)
154{ 153{
155 int shift = calc_period_shift(); 154 int shift = calc_period_shift();
156 prop_change_shift(&vm_completions, shift); 155 prop_change_shift(&vm_completions, shift);
157 prop_change_shift(&vm_dirties, shift);
158 156
159 writeback_set_ratelimit(); 157 writeback_set_ratelimit();
160} 158}
@@ -235,11 +233,6 @@ void bdi_writeout_inc(struct backing_dev_info *bdi)
235} 233}
236EXPORT_SYMBOL_GPL(bdi_writeout_inc); 234EXPORT_SYMBOL_GPL(bdi_writeout_inc);
237 235
238void task_dirty_inc(struct task_struct *tsk)
239{
240 prop_inc_single(&vm_dirties, &tsk->dirties);
241}
242
243/* 236/*
244 * Obtain an accurate fraction of the BDI's portion. 237 * Obtain an accurate fraction of the BDI's portion.
245 */ 238 */
@@ -418,8 +411,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
418 * 411 *
419 * Returns @bdi's dirty limit in pages. The term "dirty" in the context of 412 * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
420 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages. 413 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
421 * And the "limit" in the name is not seriously taken as hard limit in 414 *
422 * balance_dirty_pages(). 415 * Note that balance_dirty_pages() will only seriously take it as a hard limit
416 * when sleeping max_pause per page is not enough to keep the dirty pages under
417 * control. For example, when the device is completely stalled due to some error
418 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
419 * In the other normal situations, it acts more gently by throttling the tasks
420 * more (rather than completely block them) when the bdi dirty pages go high.
423 * 421 *
424 * It allocates high/low dirty limits to fast/slow devices, in order to prevent 422 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
425 * - starving fast devices 423 * - starving fast devices
@@ -601,6 +599,13 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
601 */ 599 */
602 if (unlikely(bdi_thresh > thresh)) 600 if (unlikely(bdi_thresh > thresh))
603 bdi_thresh = thresh; 601 bdi_thresh = thresh;
602 /*
603 * It's very possible that bdi_thresh is close to 0 not because the
604 * device is slow, but that it has remained inactive for long time.
605 * Honour such devices a reasonable good (hopefully IO efficient)
606 * threshold, so that the occasional writes won't be blocked and active
607 * writes can rampup the threshold quickly.
608 */
604 bdi_thresh = max(bdi_thresh, (limit - dirty) / 8); 609 bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
605 /* 610 /*
606 * scale global setpoint to bdi's: 611 * scale global setpoint to bdi's:
@@ -984,8 +989,7 @@ static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
984 * 989 *
985 * 8 serves as the safety ratio. 990 * 8 serves as the safety ratio.
986 */ 991 */
987 if (bdi_dirty) 992 t = min(t, bdi_dirty * HZ / (8 * bw + 1));
988 t = min(t, bdi_dirty * HZ / (8 * bw + 1));
989 993
990 /* 994 /*
991 * The pause time will be settled within range (max_pause/4, max_pause). 995 * The pause time will be settled within range (max_pause/4, max_pause).
@@ -1133,17 +1137,30 @@ pause:
1133 pages_dirtied, 1137 pages_dirtied,
1134 pause, 1138 pause,
1135 start_time); 1139 start_time);
1136 __set_current_state(TASK_UNINTERRUPTIBLE); 1140 __set_current_state(TASK_KILLABLE);
1137 io_schedule_timeout(pause); 1141 io_schedule_timeout(pause);
1138 1142
1139 dirty_thresh = hard_dirty_limit(dirty_thresh);
1140 /* 1143 /*
1141 * max-pause area. If dirty exceeded but still within this 1144 * This is typically equal to (nr_dirty < dirty_thresh) and can
1142 * area, no need to sleep for more than 200ms: (a) 8 pages per 1145 * also keep "1000+ dd on a slow USB stick" under control.
1143 * 200ms is typically more than enough to curb heavy dirtiers;
1144 * (b) the pause time limit makes the dirtiers more responsive.
1145 */ 1146 */
1146 if (nr_dirty < dirty_thresh) 1147 if (task_ratelimit)
1148 break;
1149
1150 /*
1151 * In the case of an unresponding NFS server and the NFS dirty
1152 * pages exceeds dirty_thresh, give the other good bdi's a pipe
1153 * to go through, so that tasks on them still remain responsive.
1154 *
1155 * In theory 1 page is enough to keep the comsumer-producer
1156 * pipe going: the flusher cleans 1 page => the task dirties 1
1157 * more page. However bdi_dirty has accounting errors. So use
1158 * the larger and more IO friendly bdi_stat_error.
1159 */
1160 if (bdi_dirty <= bdi_stat_error(bdi))
1161 break;
1162
1163 if (fatal_signal_pending(current))
1147 break; 1164 break;
1148 } 1165 }
1149 1166
@@ -1395,7 +1412,6 @@ void __init page_writeback_init(void)
1395 1412
1396 shift = calc_period_shift(); 1413 shift = calc_period_shift();
1397 prop_descriptor_init(&vm_completions, shift); 1414 prop_descriptor_init(&vm_completions, shift);
1398 prop_descriptor_init(&vm_dirties, shift);
1399} 1415}
1400 1416
1401/** 1417/**
@@ -1724,7 +1740,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
1724 __inc_zone_page_state(page, NR_DIRTIED); 1740 __inc_zone_page_state(page, NR_DIRTIED);
1725 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); 1741 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1726 __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); 1742 __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
1727 task_dirty_inc(current);
1728 task_io_account_write(PAGE_CACHE_SIZE); 1743 task_io_account_write(PAGE_CACHE_SIZE);
1729 } 1744 }
1730} 1745}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9dd443d89d8b..2b8ba3aebf6e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -356,8 +356,8 @@ void prep_compound_page(struct page *page, unsigned long order)
356 __SetPageHead(page); 356 __SetPageHead(page);
357 for (i = 1; i < nr_pages; i++) { 357 for (i = 1; i < nr_pages; i++) {
358 struct page *p = page + i; 358 struct page *p = page + i;
359
360 __SetPageTail(p); 359 __SetPageTail(p);
360 set_page_count(p, 0);
361 p->first_page = page; 361 p->first_page = page;
362 } 362 }
363} 363}
@@ -3377,9 +3377,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
3377 unsigned long block_migratetype; 3377 unsigned long block_migratetype;
3378 int reserve; 3378 int reserve;
3379 3379
3380 /* Get the start pfn, end pfn and the number of blocks to reserve */ 3380 /*
3381 * Get the start pfn, end pfn and the number of blocks to reserve
3382 * We have to be careful to be aligned to pageblock_nr_pages to
3383 * make sure that we always check pfn_valid for the first page in
3384 * the block.
3385 */
3381 start_pfn = zone->zone_start_pfn; 3386 start_pfn = zone->zone_start_pfn;
3382 end_pfn = start_pfn + zone->spanned_pages; 3387 end_pfn = start_pfn + zone->spanned_pages;
3388 start_pfn = roundup(start_pfn, pageblock_nr_pages);
3383 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >> 3389 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3384 pageblock_order; 3390 pageblock_order;
3385 3391
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index ea534960a04b..12a48a88c0d8 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -50,14 +50,13 @@ static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
50 50
51 if (!pages || !bitmap) { 51 if (!pages || !bitmap) {
52 if (may_alloc && !pages) 52 if (may_alloc && !pages)
53 pages = pcpu_mem_alloc(pages_size); 53 pages = pcpu_mem_zalloc(pages_size);
54 if (may_alloc && !bitmap) 54 if (may_alloc && !bitmap)
55 bitmap = pcpu_mem_alloc(bitmap_size); 55 bitmap = pcpu_mem_zalloc(bitmap_size);
56 if (!pages || !bitmap) 56 if (!pages || !bitmap)
57 return NULL; 57 return NULL;
58 } 58 }
59 59
60 memset(pages, 0, pages_size);
61 bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages); 60 bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
62 61
63 *bitmapp = bitmap; 62 *bitmapp = bitmap;
@@ -143,8 +142,8 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
143 int page_start, int page_end) 142 int page_start, int page_end)
144{ 143{
145 flush_cache_vunmap( 144 flush_cache_vunmap(
146 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 145 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
147 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 146 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
148} 147}
149 148
150static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) 149static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
@@ -206,8 +205,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
206 int page_start, int page_end) 205 int page_start, int page_end)
207{ 206{
208 flush_tlb_kernel_range( 207 flush_tlb_kernel_range(
209 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 208 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
210 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 209 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
211} 210}
212 211
213static int __pcpu_map_pages(unsigned long addr, struct page **pages, 212static int __pcpu_map_pages(unsigned long addr, struct page **pages,
@@ -284,8 +283,8 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
284 int page_start, int page_end) 283 int page_start, int page_end)
285{ 284{
286 flush_cache_vmap( 285 flush_cache_vmap(
287 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 286 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
288 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 287 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
289} 288}
290 289
291/** 290/**
diff --git a/mm/percpu.c b/mm/percpu.c
index bf80e55dbed7..3bb810a72006 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -116,9 +116,9 @@ static int pcpu_atom_size __read_mostly;
116static int pcpu_nr_slots __read_mostly; 116static int pcpu_nr_slots __read_mostly;
117static size_t pcpu_chunk_struct_size __read_mostly; 117static size_t pcpu_chunk_struct_size __read_mostly;
118 118
119/* cpus with the lowest and highest unit numbers */ 119/* cpus with the lowest and highest unit addresses */
120static unsigned int pcpu_first_unit_cpu __read_mostly; 120static unsigned int pcpu_low_unit_cpu __read_mostly;
121static unsigned int pcpu_last_unit_cpu __read_mostly; 121static unsigned int pcpu_high_unit_cpu __read_mostly;
122 122
123/* the address of the first chunk which starts with the kernel static area */ 123/* the address of the first chunk which starts with the kernel static area */
124void *pcpu_base_addr __read_mostly; 124void *pcpu_base_addr __read_mostly;
@@ -273,11 +273,11 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
273 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) 273 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
274 274
275/** 275/**
276 * pcpu_mem_alloc - allocate memory 276 * pcpu_mem_zalloc - allocate memory
277 * @size: bytes to allocate 277 * @size: bytes to allocate
278 * 278 *
279 * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 279 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
280 * kzalloc() is used; otherwise, vmalloc() is used. The returned 280 * kzalloc() is used; otherwise, vzalloc() is used. The returned
281 * memory is always zeroed. 281 * memory is always zeroed.
282 * 282 *
283 * CONTEXT: 283 * CONTEXT:
@@ -286,7 +286,7 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
286 * RETURNS: 286 * RETURNS:
287 * Pointer to the allocated area on success, NULL on failure. 287 * Pointer to the allocated area on success, NULL on failure.
288 */ 288 */
289static void *pcpu_mem_alloc(size_t size) 289static void *pcpu_mem_zalloc(size_t size)
290{ 290{
291 if (WARN_ON_ONCE(!slab_is_available())) 291 if (WARN_ON_ONCE(!slab_is_available()))
292 return NULL; 292 return NULL;
@@ -302,7 +302,7 @@ static void *pcpu_mem_alloc(size_t size)
302 * @ptr: memory to free 302 * @ptr: memory to free
303 * @size: size of the area 303 * @size: size of the area
304 * 304 *
305 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). 305 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
306 */ 306 */
307static void pcpu_mem_free(void *ptr, size_t size) 307static void pcpu_mem_free(void *ptr, size_t size)
308{ 308{
@@ -384,7 +384,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
384 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); 384 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
385 unsigned long flags; 385 unsigned long flags;
386 386
387 new = pcpu_mem_alloc(new_size); 387 new = pcpu_mem_zalloc(new_size);
388 if (!new) 388 if (!new)
389 return -ENOMEM; 389 return -ENOMEM;
390 390
@@ -604,11 +604,12 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
604{ 604{
605 struct pcpu_chunk *chunk; 605 struct pcpu_chunk *chunk;
606 606
607 chunk = pcpu_mem_alloc(pcpu_chunk_struct_size); 607 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
608 if (!chunk) 608 if (!chunk)
609 return NULL; 609 return NULL;
610 610
611 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); 611 chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
612 sizeof(chunk->map[0]));
612 if (!chunk->map) { 613 if (!chunk->map) {
613 kfree(chunk); 614 kfree(chunk);
614 return NULL; 615 return NULL;
@@ -977,6 +978,17 @@ bool is_kernel_percpu_address(unsigned long addr)
977 * address. The caller is responsible for ensuring @addr stays valid 978 * address. The caller is responsible for ensuring @addr stays valid
978 * until this function finishes. 979 * until this function finishes.
979 * 980 *
981 * percpu allocator has special setup for the first chunk, which currently
982 * supports either embedding in linear address space or vmalloc mapping,
983 * and, from the second one, the backing allocator (currently either vm or
984 * km) provides translation.
985 *
986 * The addr can be tranlated simply without checking if it falls into the
987 * first chunk. But the current code reflects better how percpu allocator
988 * actually works, and the verification can discover both bugs in percpu
989 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
990 * code.
991 *
980 * RETURNS: 992 * RETURNS:
981 * The physical address for @addr. 993 * The physical address for @addr.
982 */ 994 */
@@ -984,19 +996,19 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
984{ 996{
985 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 997 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
986 bool in_first_chunk = false; 998 bool in_first_chunk = false;
987 unsigned long first_start, first_end; 999 unsigned long first_low, first_high;
988 unsigned int cpu; 1000 unsigned int cpu;
989 1001
990 /* 1002 /*
991 * The following test on first_start/end isn't strictly 1003 * The following test on unit_low/high isn't strictly
992 * necessary but will speed up lookups of addresses which 1004 * necessary but will speed up lookups of addresses which
993 * aren't in the first chunk. 1005 * aren't in the first chunk.
994 */ 1006 */
995 first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0); 1007 first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
996 first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu, 1008 first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
997 pcpu_unit_pages); 1009 pcpu_unit_pages);
998 if ((unsigned long)addr >= first_start && 1010 if ((unsigned long)addr >= first_low &&
999 (unsigned long)addr < first_end) { 1011 (unsigned long)addr < first_high) {
1000 for_each_possible_cpu(cpu) { 1012 for_each_possible_cpu(cpu) {
1001 void *start = per_cpu_ptr(base, cpu); 1013 void *start = per_cpu_ptr(base, cpu);
1002 1014
@@ -1233,7 +1245,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1233 1245
1234 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1246 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1235 unit_map[cpu] = UINT_MAX; 1247 unit_map[cpu] = UINT_MAX;
1236 pcpu_first_unit_cpu = NR_CPUS; 1248
1249 pcpu_low_unit_cpu = NR_CPUS;
1250 pcpu_high_unit_cpu = NR_CPUS;
1237 1251
1238 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1252 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1239 const struct pcpu_group_info *gi = &ai->groups[group]; 1253 const struct pcpu_group_info *gi = &ai->groups[group];
@@ -1253,9 +1267,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1253 unit_map[cpu] = unit + i; 1267 unit_map[cpu] = unit + i;
1254 unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1268 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1255 1269
1256 if (pcpu_first_unit_cpu == NR_CPUS) 1270 /* determine low/high unit_cpu */
1257 pcpu_first_unit_cpu = cpu; 1271 if (pcpu_low_unit_cpu == NR_CPUS ||
1258 pcpu_last_unit_cpu = cpu; 1272 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1273 pcpu_low_unit_cpu = cpu;
1274 if (pcpu_high_unit_cpu == NR_CPUS ||
1275 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1276 pcpu_high_unit_cpu = cpu;
1259 } 1277 }
1260 } 1278 }
1261 pcpu_nr_units = unit; 1279 pcpu_nr_units = unit;
@@ -1889,7 +1907,7 @@ void __init percpu_init_late(void)
1889 1907
1890 BUILD_BUG_ON(size > PAGE_SIZE); 1908 BUILD_BUG_ON(size > PAGE_SIZE);
1891 1909
1892 map = pcpu_mem_alloc(size); 1910 map = pcpu_mem_zalloc(size);
1893 BUG_ON(!map); 1911 BUG_ON(!map);
1894 1912
1895 spin_lock_irqsave(&pcpu_lock, flags); 1913 spin_lock_irqsave(&pcpu_lock, flags);
diff --git a/mm/slab.c b/mm/slab.c
index 708efe886154..83311c9aaf9d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -595,6 +595,7 @@ static enum {
595 PARTIAL_AC, 595 PARTIAL_AC,
596 PARTIAL_L3, 596 PARTIAL_L3,
597 EARLY, 597 EARLY,
598 LATE,
598 FULL 599 FULL
599} g_cpucache_up; 600} g_cpucache_up;
600 601
@@ -671,7 +672,7 @@ static void init_node_lock_keys(int q)
671{ 672{
672 struct cache_sizes *s = malloc_sizes; 673 struct cache_sizes *s = malloc_sizes;
673 674
674 if (g_cpucache_up != FULL) 675 if (g_cpucache_up < LATE)
675 return; 676 return;
676 677
677 for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { 678 for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
@@ -1666,6 +1667,8 @@ void __init kmem_cache_init_late(void)
1666{ 1667{
1667 struct kmem_cache *cachep; 1668 struct kmem_cache *cachep;
1668 1669
1670 g_cpucache_up = LATE;
1671
1669 /* Annotate slab for lockdep -- annotate the malloc caches */ 1672 /* Annotate slab for lockdep -- annotate the malloc caches */
1670 init_lock_keys(); 1673 init_lock_keys();
1671 1674
diff --git a/mm/slub.c b/mm/slub.c
index 7d2a996c307e..ed3334d9b6da 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1862,7 +1862,7 @@ static void unfreeze_partials(struct kmem_cache *s)
1862{ 1862{
1863 struct kmem_cache_node *n = NULL; 1863 struct kmem_cache_node *n = NULL;
1864 struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); 1864 struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
1865 struct page *page; 1865 struct page *page, *discard_page = NULL;
1866 1866
1867 while ((page = c->partial)) { 1867 while ((page = c->partial)) {
1868 enum slab_modes { M_PARTIAL, M_FREE }; 1868 enum slab_modes { M_PARTIAL, M_FREE };
@@ -1904,7 +1904,8 @@ static void unfreeze_partials(struct kmem_cache *s)
1904 if (l == M_PARTIAL) 1904 if (l == M_PARTIAL)
1905 remove_partial(n, page); 1905 remove_partial(n, page);
1906 else 1906 else
1907 add_partial(n, page, 1); 1907 add_partial(n, page,
1908 DEACTIVATE_TO_TAIL);
1908 1909
1909 l = m; 1910 l = m;
1910 } 1911 }
@@ -1915,14 +1916,22 @@ static void unfreeze_partials(struct kmem_cache *s)
1915 "unfreezing slab")); 1916 "unfreezing slab"));
1916 1917
1917 if (m == M_FREE) { 1918 if (m == M_FREE) {
1918 stat(s, DEACTIVATE_EMPTY); 1919 page->next = discard_page;
1919 discard_slab(s, page); 1920 discard_page = page;
1920 stat(s, FREE_SLAB);
1921 } 1921 }
1922 } 1922 }
1923 1923
1924 if (n) 1924 if (n)
1925 spin_unlock(&n->list_lock); 1925 spin_unlock(&n->list_lock);
1926
1927 while (discard_page) {
1928 page = discard_page;
1929 discard_page = discard_page->next;
1930
1931 stat(s, DEACTIVATE_EMPTY);
1932 discard_slab(s, page);
1933 stat(s, FREE_SLAB);
1934 }
1926} 1935}
1927 1936
1928/* 1937/*
@@ -1969,7 +1978,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1969 page->pobjects = pobjects; 1978 page->pobjects = pobjects;
1970 page->next = oldpage; 1979 page->next = oldpage;
1971 1980
1972 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); 1981 } while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
1973 stat(s, CPU_PARTIAL_FREE); 1982 stat(s, CPU_PARTIAL_FREE);
1974 return pobjects; 1983 return pobjects;
1975} 1984}
@@ -4435,30 +4444,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4435 4444
4436 for_each_possible_cpu(cpu) { 4445 for_each_possible_cpu(cpu) {
4437 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 4446 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
4447 int node = ACCESS_ONCE(c->node);
4438 struct page *page; 4448 struct page *page;
4439 4449
4440 if (!c || c->node < 0) 4450 if (node < 0)
4441 continue; 4451 continue;
4442 4452 page = ACCESS_ONCE(c->page);
4443 if (c->page) { 4453 if (page) {
4444 if (flags & SO_TOTAL) 4454 if (flags & SO_TOTAL)
4445 x = c->page->objects; 4455 x = page->objects;
4446 else if (flags & SO_OBJECTS) 4456 else if (flags & SO_OBJECTS)
4447 x = c->page->inuse; 4457 x = page->inuse;
4448 else 4458 else
4449 x = 1; 4459 x = 1;
4450 4460
4451 total += x; 4461 total += x;
4452 nodes[c->node] += x; 4462 nodes[node] += x;
4453 } 4463 }
4454 page = c->partial; 4464 page = c->partial;
4455 4465
4456 if (page) { 4466 if (page) {
4457 x = page->pobjects; 4467 x = page->pobjects;
4458 total += x; 4468 total += x;
4459 nodes[c->node] += x; 4469 nodes[node] += x;
4460 } 4470 }
4461 per_cpu[c->node]++; 4471 per_cpu[node]++;
4462 } 4472 }
4463 } 4473 }
4464 4474
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b669aa6f6caf..1d8b32f07139 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1633,6 +1633,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
1633 goto fail; 1633 goto fail;
1634 1634
1635 addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); 1635 addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
1636 if (!addr)
1637 return NULL;
1636 1638
1637 /* 1639 /*
1638 * In this function, newly allocated vm_struct is not added 1640 * In this function, newly allocated vm_struct is not added
@@ -2141,23 +2143,30 @@ void __attribute__((weak)) vmalloc_sync_all(void)
2141 2143
2142static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 2144static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2143{ 2145{
2144 /* apply_to_page_range() does all the hard work. */ 2146 pte_t ***p = data;
2147
2148 if (p) {
2149 *(*p) = pte;
2150 (*p)++;
2151 }
2145 return 0; 2152 return 0;
2146} 2153}
2147 2154
2148/** 2155/**
2149 * alloc_vm_area - allocate a range of kernel address space 2156 * alloc_vm_area - allocate a range of kernel address space
2150 * @size: size of the area 2157 * @size: size of the area
2158 * @ptes: returns the PTEs for the address space
2151 * 2159 *
2152 * Returns: NULL on failure, vm_struct on success 2160 * Returns: NULL on failure, vm_struct on success
2153 * 2161 *
2154 * This function reserves a range of kernel address space, and 2162 * This function reserves a range of kernel address space, and
2155 * allocates pagetables to map that range. No actual mappings 2163 * allocates pagetables to map that range. No actual mappings
2156 * are created. If the kernel address space is not shared 2164 * are created.
2157 * between processes, it syncs the pagetable across all 2165 *
2158 * processes. 2166 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
2167 * allocated for the VM area are returned.
2159 */ 2168 */
2160struct vm_struct *alloc_vm_area(size_t size) 2169struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2161{ 2170{
2162 struct vm_struct *area; 2171 struct vm_struct *area;
2163 2172
@@ -2171,19 +2180,11 @@ struct vm_struct *alloc_vm_area(size_t size)
2171 * of kernel virtual address space and mapped into init_mm. 2180 * of kernel virtual address space and mapped into init_mm.
2172 */ 2181 */
2173 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 2182 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2174 area->size, f, NULL)) { 2183 size, f, ptes ? &ptes : NULL)) {
2175 free_vm_area(area); 2184 free_vm_area(area);
2176 return NULL; 2185 return NULL;
2177 } 2186 }
2178 2187
2179 /*
2180 * If the allocated address space is passed to a hypercall
2181 * before being used then we cannot rely on a page fault to
2182 * trigger an update of the page tables. So sync all the page
2183 * tables here.
2184 */
2185 vmalloc_sync_all();
2186
2187 return area; 2188 return area;
2188} 2189}
2189EXPORT_SYMBOL_GPL(alloc_vm_area); 2190EXPORT_SYMBOL_GPL(alloc_vm_area);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a1893c050795..f54a05b7a61d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
183 */ 183 */
184void register_shrinker(struct shrinker *shrinker) 184void register_shrinker(struct shrinker *shrinker)
185{ 185{
186 shrinker->nr = 0; 186 atomic_long_set(&shrinker->nr_in_batch, 0);
187 down_write(&shrinker_rwsem); 187 down_write(&shrinker_rwsem);
188 list_add_tail(&shrinker->list, &shrinker_list); 188 list_add_tail(&shrinker->list, &shrinker_list);
189 up_write(&shrinker_rwsem); 189 up_write(&shrinker_rwsem);
@@ -247,25 +247,26 @@ unsigned long shrink_slab(struct shrink_control *shrink,
247 247
248 list_for_each_entry(shrinker, &shrinker_list, list) { 248 list_for_each_entry(shrinker, &shrinker_list, list) {
249 unsigned long long delta; 249 unsigned long long delta;
250 unsigned long total_scan; 250 long total_scan;
251 unsigned long max_pass; 251 long max_pass;
252 int shrink_ret = 0; 252 int shrink_ret = 0;
253 long nr; 253 long nr;
254 long new_nr; 254 long new_nr;
255 long batch_size = shrinker->batch ? shrinker->batch 255 long batch_size = shrinker->batch ? shrinker->batch
256 : SHRINK_BATCH; 256 : SHRINK_BATCH;
257 257
258 max_pass = do_shrinker_shrink(shrinker, shrink, 0);
259 if (max_pass <= 0)
260 continue;
261
258 /* 262 /*
259 * copy the current shrinker scan count into a local variable 263 * copy the current shrinker scan count into a local variable
260 * and zero it so that other concurrent shrinker invocations 264 * and zero it so that other concurrent shrinker invocations
261 * don't also do this scanning work. 265 * don't also do this scanning work.
262 */ 266 */
263 do { 267 nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
264 nr = shrinker->nr;
265 } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
266 268
267 total_scan = nr; 269 total_scan = nr;
268 max_pass = do_shrinker_shrink(shrinker, shrink, 0);
269 delta = (4 * nr_pages_scanned) / shrinker->seeks; 270 delta = (4 * nr_pages_scanned) / shrinker->seeks;
270 delta *= max_pass; 271 delta *= max_pass;
271 do_div(delta, lru_pages + 1); 272 do_div(delta, lru_pages + 1);
@@ -325,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
325 * manner that handles concurrent updates. If we exhausted the 326 * manner that handles concurrent updates. If we exhausted the
326 * scan, there is no need to do an update. 327 * scan, there is no need to do an update.
327 */ 328 */
328 do { 329 if (total_scan > 0)
329 nr = shrinker->nr; 330 new_nr = atomic_long_add_return(total_scan,
330 new_nr = total_scan + nr; 331 &shrinker->nr_in_batch);
331 if (total_scan <= 0) 332 else
332 break; 333 new_nr = atomic_long_read(&shrinker->nr_in_batch);
333 } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
334 334
335 trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); 335 trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
336 } 336 }
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index c7aafc7c5ed4..5f09a578d49d 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -245,9 +245,11 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
245 if (tt_global_entry) { 245 if (tt_global_entry) {
246 /* This node is probably going to update its tt table */ 246 /* This node is probably going to update its tt table */
247 tt_global_entry->orig_node->tt_poss_change = true; 247 tt_global_entry->orig_node->tt_poss_change = true;
248 /* The global entry has to be marked as PENDING and has to be 248 /* The global entry has to be marked as ROAMING and has to be
249 * kept for consistency purpose */ 249 * kept for consistency purpose */
250 tt_global_entry->flags |= TT_CLIENT_PENDING; 250 tt_global_entry->flags |= TT_CLIENT_ROAM;
251 tt_global_entry->roam_at = jiffies;
252
251 send_roam_adv(bat_priv, tt_global_entry->addr, 253 send_roam_adv(bat_priv, tt_global_entry->addr,
252 tt_global_entry->orig_node); 254 tt_global_entry->orig_node);
253 } 255 }
@@ -694,6 +696,7 @@ void tt_global_del(struct bat_priv *bat_priv,
694 const char *message, bool roaming) 696 const char *message, bool roaming)
695{ 697{
696 struct tt_global_entry *tt_global_entry = NULL; 698 struct tt_global_entry *tt_global_entry = NULL;
699 struct tt_local_entry *tt_local_entry = NULL;
697 700
698 tt_global_entry = tt_global_hash_find(bat_priv, addr); 701 tt_global_entry = tt_global_hash_find(bat_priv, addr);
699 if (!tt_global_entry) 702 if (!tt_global_entry)
@@ -701,15 +704,29 @@ void tt_global_del(struct bat_priv *bat_priv,
701 704
702 if (tt_global_entry->orig_node == orig_node) { 705 if (tt_global_entry->orig_node == orig_node) {
703 if (roaming) { 706 if (roaming) {
704 tt_global_entry->flags |= TT_CLIENT_ROAM; 707 /* if we are deleting a global entry due to a roam
705 tt_global_entry->roam_at = jiffies; 708 * event, there are two possibilities:
706 goto out; 709 * 1) the client roamed from node A to node B => we mark
710 * it with TT_CLIENT_ROAM, we start a timer and we
711 * wait for node B to claim it. In case of timeout
712 * the entry is purged.
713 * 2) the client roamed to us => we can directly delete
714 * the global entry, since it is useless now. */
715 tt_local_entry = tt_local_hash_find(bat_priv,
716 tt_global_entry->addr);
717 if (!tt_local_entry) {
718 tt_global_entry->flags |= TT_CLIENT_ROAM;
719 tt_global_entry->roam_at = jiffies;
720 goto out;
721 }
707 } 722 }
708 _tt_global_del(bat_priv, tt_global_entry, message); 723 _tt_global_del(bat_priv, tt_global_entry, message);
709 } 724 }
710out: 725out:
711 if (tt_global_entry) 726 if (tt_global_entry)
712 tt_global_entry_free_ref(tt_global_entry); 727 tt_global_entry_free_ref(tt_global_entry);
728 if (tt_local_entry)
729 tt_local_entry_free_ref(tt_local_entry);
713} 730}
714 731
715void tt_global_del_orig(struct bat_priv *bat_priv, 732void tt_global_del_orig(struct bat_priv *bat_priv,
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 91bcd3a961ec..1eea8208b2cc 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -79,17 +79,12 @@ static struct bnep_session *__bnep_get_session(u8 *dst)
79 79
80static void __bnep_link_session(struct bnep_session *s) 80static void __bnep_link_session(struct bnep_session *s)
81{ 81{
82 /* It's safe to call __module_get() here because sessions are added
83 by the socket layer which has to hold the reference to this module.
84 */
85 __module_get(THIS_MODULE);
86 list_add(&s->list, &bnep_session_list); 82 list_add(&s->list, &bnep_session_list);
87} 83}
88 84
89static void __bnep_unlink_session(struct bnep_session *s) 85static void __bnep_unlink_session(struct bnep_session *s)
90{ 86{
91 list_del(&s->list); 87 list_del(&s->list);
92 module_put(THIS_MODULE);
93} 88}
94 89
95static int bnep_send(struct bnep_session *s, void *data, size_t len) 90static int bnep_send(struct bnep_session *s, void *data, size_t len)
@@ -530,6 +525,7 @@ static int bnep_session(void *arg)
530 525
531 up_write(&bnep_session_sem); 526 up_write(&bnep_session_sem);
532 free_netdev(dev); 527 free_netdev(dev);
528 module_put_and_exit(0);
533 return 0; 529 return 0;
534} 530}
535 531
@@ -616,9 +612,11 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
616 612
617 __bnep_link_session(s); 613 __bnep_link_session(s);
618 614
615 __module_get(THIS_MODULE);
619 s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name); 616 s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
620 if (IS_ERR(s->task)) { 617 if (IS_ERR(s->task)) {
621 /* Session thread start failed, gotta cleanup. */ 618 /* Session thread start failed, gotta cleanup. */
619 module_put(THIS_MODULE);
622 unregister_netdev(dev); 620 unregister_netdev(dev);
623 __bnep_unlink_session(s); 621 __bnep_unlink_session(s);
624 err = PTR_ERR(s->task); 622 err = PTR_ERR(s->task);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 7d00ddf9e9dc..5a6e634f7fca 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -67,14 +67,12 @@ static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr)
67 67
68static void __cmtp_link_session(struct cmtp_session *session) 68static void __cmtp_link_session(struct cmtp_session *session)
69{ 69{
70 __module_get(THIS_MODULE);
71 list_add(&session->list, &cmtp_session_list); 70 list_add(&session->list, &cmtp_session_list);
72} 71}
73 72
74static void __cmtp_unlink_session(struct cmtp_session *session) 73static void __cmtp_unlink_session(struct cmtp_session *session)
75{ 74{
76 list_del(&session->list); 75 list_del(&session->list);
77 module_put(THIS_MODULE);
78} 76}
79 77
80static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci) 78static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
@@ -327,6 +325,7 @@ static int cmtp_session(void *arg)
327 up_write(&cmtp_session_sem); 325 up_write(&cmtp_session_sem);
328 326
329 kfree(session); 327 kfree(session);
328 module_put_and_exit(0);
330 return 0; 329 return 0;
331} 330}
332 331
@@ -376,9 +375,11 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
376 375
377 __cmtp_link_session(session); 376 __cmtp_link_session(session);
378 377
378 __module_get(THIS_MODULE);
379 session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d", 379 session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
380 session->num); 380 session->num);
381 if (IS_ERR(session->task)) { 381 if (IS_ERR(session->task)) {
382 module_put(THIS_MODULE);
382 err = PTR_ERR(session->task); 383 err = PTR_ERR(session->task);
383 goto unlink; 384 goto unlink;
384 } 385 }
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index c1c597e3e198..e0af7237cd92 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -673,7 +673,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
673 goto encrypt; 673 goto encrypt;
674 674
675auth: 675auth:
676 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 676 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
677 return 0; 677 return 0;
678 678
679 if (!hci_conn_auth(conn, sec_level, auth_type)) 679 if (!hci_conn_auth(conn, sec_level, auth_type))
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index d7d96b6b1f0d..643a41b76e2e 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -545,7 +545,7 @@ static void hci_setup(struct hci_dev *hdev)
545{ 545{
546 hci_setup_event_mask(hdev); 546 hci_setup_event_mask(hdev);
547 547
548 if (hdev->lmp_ver > 1) 548 if (hdev->hci_ver > 1)
549 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 549 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
550 550
551 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 551 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 8cd12917733b..5ea94a1eecf2 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -251,7 +251,7 @@ static void l2cap_chan_timeout(unsigned long arg)
251 251
252 if (sock_owned_by_user(sk)) { 252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */ 253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, HZ / 5); 254 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
255 bh_unlock_sock(sk); 255 bh_unlock_sock(sk);
256 chan_put(chan); 256 chan_put(chan);
257 return; 257 return;
@@ -2488,7 +2488,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2488 if (sock_owned_by_user(sk)) { 2488 if (sock_owned_by_user(sk)) {
2489 l2cap_state_change(chan, BT_DISCONN); 2489 l2cap_state_change(chan, BT_DISCONN);
2490 __clear_chan_timer(chan); 2490 __clear_chan_timer(chan);
2491 __set_chan_timer(chan, HZ / 5); 2491 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2492 break; 2492 break;
2493 } 2493 }
2494 2494
@@ -2661,7 +2661,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2661 2661
2662 default: 2662 default:
2663 sk->sk_err = ECONNRESET; 2663 sk->sk_err = ECONNRESET;
2664 __set_chan_timer(chan, HZ * 5); 2664 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2665 l2cap_send_disconn_req(conn, chan, ECONNRESET); 2665 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2666 goto done; 2666 goto done;
2667 } 2667 }
@@ -2718,7 +2718,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2718 if (sock_owned_by_user(sk)) { 2718 if (sock_owned_by_user(sk)) {
2719 l2cap_state_change(chan, BT_DISCONN); 2719 l2cap_state_change(chan, BT_DISCONN);
2720 __clear_chan_timer(chan); 2720 __clear_chan_timer(chan);
2721 __set_chan_timer(chan, HZ / 5); 2721 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2722 bh_unlock_sock(sk); 2722 bh_unlock_sock(sk);
2723 return 0; 2723 return 0;
2724 } 2724 }
@@ -2752,7 +2752,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2752 if (sock_owned_by_user(sk)) { 2752 if (sock_owned_by_user(sk)) {
2753 l2cap_state_change(chan,BT_DISCONN); 2753 l2cap_state_change(chan,BT_DISCONN);
2754 __clear_chan_timer(chan); 2754 __clear_chan_timer(chan);
2755 __set_chan_timer(chan, HZ / 5); 2755 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2756 bh_unlock_sock(sk); 2756 bh_unlock_sock(sk);
2757 return 0; 2757 return 0;
2758 } 2758 }
@@ -3998,7 +3998,7 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3998 if (encrypt == 0x00) { 3998 if (encrypt == 0x00) {
3999 if (chan->sec_level == BT_SECURITY_MEDIUM) { 3999 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4000 __clear_chan_timer(chan); 4000 __clear_chan_timer(chan);
4001 __set_chan_timer(chan, HZ * 5); 4001 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4002 } else if (chan->sec_level == BT_SECURITY_HIGH) 4002 } else if (chan->sec_level == BT_SECURITY_HIGH)
4003 l2cap_chan_close(chan, ECONNREFUSED); 4003 l2cap_chan_close(chan, ECONNREFUSED);
4004 } else { 4004 } else {
@@ -4066,7 +4066,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4066 L2CAP_CONN_REQ, sizeof(req), &req); 4066 L2CAP_CONN_REQ, sizeof(req), &req);
4067 } else { 4067 } else {
4068 __clear_chan_timer(chan); 4068 __clear_chan_timer(chan);
4069 __set_chan_timer(chan, HZ / 10); 4069 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4070 } 4070 }
4071 } else if (chan->state == BT_CONNECT2) { 4071 } else if (chan->state == BT_CONNECT2) {
4072 struct l2cap_conn_rsp rsp; 4072 struct l2cap_conn_rsp rsp;
@@ -4086,7 +4086,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4086 } 4086 }
4087 } else { 4087 } else {
4088 l2cap_state_change(chan, BT_DISCONN); 4088 l2cap_state_change(chan, BT_DISCONN);
4089 __set_chan_timer(chan, HZ / 10); 4089 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4090 res = L2CAP_CR_SEC_BLOCK; 4090 res = L2CAP_CR_SEC_BLOCK;
4091 stat = L2CAP_CS_NO_INFO; 4091 stat = L2CAP_CS_NO_INFO;
4092 } 4092 }
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 995cbe0ac0b2..a5f4e5769809 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1501,6 +1501,8 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1501 1501
1502 __skb_pull(skb2, offset); 1502 __skb_pull(skb2, offset);
1503 skb_reset_transport_header(skb2); 1503 skb_reset_transport_header(skb2);
1504 skb_postpull_rcsum(skb2, skb_network_header(skb2),
1505 skb_network_header_len(skb2));
1504 1506
1505 icmp6_type = icmp6_hdr(skb2)->icmp6_type; 1507 icmp6_type = icmp6_hdr(skb2)->icmp6_type;
1506 1508
@@ -1770,7 +1772,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1770 int err = 0; 1772 int err = 0;
1771 struct net_bridge_mdb_htable *mdb; 1773 struct net_bridge_mdb_htable *mdb;
1772 1774
1773 spin_lock(&br->multicast_lock); 1775 spin_lock_bh(&br->multicast_lock);
1774 if (br->multicast_disabled == !val) 1776 if (br->multicast_disabled == !val)
1775 goto unlock; 1777 goto unlock;
1776 1778
@@ -1806,7 +1808,7 @@ rollback:
1806 } 1808 }
1807 1809
1808unlock: 1810unlock:
1809 spin_unlock(&br->multicast_lock); 1811 spin_unlock_bh(&br->multicast_lock);
1810 1812
1811 return err; 1813 return err;
1812} 1814}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e5f9ece3c9a0..a1daf8227ed1 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -18,6 +18,7 @@
18#include <net/sock.h> 18#include <net/sock.h>
19 19
20#include "br_private.h" 20#include "br_private.h"
21#include "br_private_stp.h"
21 22
22static inline size_t br_nlmsg_size(void) 23static inline size_t br_nlmsg_size(void)
23{ 24{
@@ -188,6 +189,11 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
188 189
189 p->state = new_state; 190 p->state = new_state;
190 br_log_state(p); 191 br_log_state(p);
192
193 spin_lock_bh(&p->br->lock);
194 br_port_state_selection(p->br);
195 spin_unlock_bh(&p->br->lock);
196
191 br_ifinfo_notify(RTM_NEWLINK, p); 197 br_ifinfo_notify(RTM_NEWLINK, p);
192 198
193 return 0; 199 return 0;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index ad0a3f7cf6cc..dd147d78a588 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -399,25 +399,24 @@ void br_port_state_selection(struct net_bridge *br)
399 struct net_bridge_port *p; 399 struct net_bridge_port *p;
400 unsigned int liveports = 0; 400 unsigned int liveports = 0;
401 401
402 /* Don't change port states if userspace is handling STP */
403 if (br->stp_enabled == BR_USER_STP)
404 return;
405
406 list_for_each_entry(p, &br->port_list, list) { 402 list_for_each_entry(p, &br->port_list, list) {
407 if (p->state == BR_STATE_DISABLED) 403 if (p->state == BR_STATE_DISABLED)
408 continue; 404 continue;
409 405
410 if (p->port_no == br->root_port) { 406 /* Don't change port states if userspace is handling STP */
411 p->config_pending = 0; 407 if (br->stp_enabled != BR_USER_STP) {
412 p->topology_change_ack = 0; 408 if (p->port_no == br->root_port) {
413 br_make_forwarding(p); 409 p->config_pending = 0;
414 } else if (br_is_designated_port(p)) { 410 p->topology_change_ack = 0;
415 del_timer(&p->message_age_timer); 411 br_make_forwarding(p);
416 br_make_forwarding(p); 412 } else if (br_is_designated_port(p)) {
417 } else { 413 del_timer(&p->message_age_timer);
418 p->config_pending = 0; 414 br_make_forwarding(p);
419 p->topology_change_ack = 0; 415 } else {
420 br_make_blocking(p); 416 p->config_pending = 0;
417 p->topology_change_ack = 0;
418 br_make_blocking(p);
419 }
421 } 420 }
422 421
423 if (p->state == BR_STATE_FORWARDING) 422 if (p->state == BR_STATE_FORWARDING)
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index f39921171d0d..d3ca87bf23b7 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -136,20 +136,21 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
136 136
137static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt) 137static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
138{ 138{
139 int tmp;
140 u16 chks; 139 u16 chks;
141 u16 len; 140 u16 len;
141 __le16 data;
142
142 struct cffrml *this = container_obj(layr); 143 struct cffrml *this = container_obj(layr);
143 if (this->dofcs) { 144 if (this->dofcs) {
144 chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); 145 chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
145 tmp = cpu_to_le16(chks); 146 data = cpu_to_le16(chks);
146 cfpkt_add_trail(pkt, &tmp, 2); 147 cfpkt_add_trail(pkt, &data, 2);
147 } else { 148 } else {
148 cfpkt_pad_trail(pkt, 2); 149 cfpkt_pad_trail(pkt, 2);
149 } 150 }
150 len = cfpkt_getlen(pkt); 151 len = cfpkt_getlen(pkt);
151 tmp = cpu_to_le16(len); 152 data = cpu_to_le16(len);
152 cfpkt_add_head(pkt, &tmp, 2); 153 cfpkt_add_head(pkt, &data, 2);
153 cfpkt_info(pkt)->hdr_len += 2; 154 cfpkt_info(pkt)->hdr_len += 2;
154 if (cfpkt_erroneous(pkt)) { 155 if (cfpkt_erroneous(pkt)) {
155 pr_err("Packet is erroneous!\n"); 156 pr_err("Packet is erroneous!\n");
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 42599e31dcad..3a94eae7abe9 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -477,7 +477,6 @@ int crush_do_rule(struct crush_map *map,
477 int i, j; 477 int i, j;
478 int numrep; 478 int numrep;
479 int firstn; 479 int firstn;
480 int rc = -1;
481 480
482 BUG_ON(ruleno >= map->max_rules); 481 BUG_ON(ruleno >= map->max_rules);
483 482
@@ -491,23 +490,18 @@ int crush_do_rule(struct crush_map *map,
491 * that this may or may not correspond to the specific types 490 * that this may or may not correspond to the specific types
492 * referenced by the crush rule. 491 * referenced by the crush rule.
493 */ 492 */
494 if (force >= 0) { 493 if (force >= 0 &&
495 if (force >= map->max_devices || 494 force < map->max_devices &&
496 map->device_parents[force] == 0) { 495 map->device_parents[force] != 0 &&
497 /*dprintk("CRUSH: forcefed device dne\n");*/ 496 !is_out(map, weight, force, x)) {
498 rc = -1; /* force fed device dne */ 497 while (1) {
499 goto out; 498 force_context[++force_pos] = force;
500 } 499 if (force >= 0)
501 if (!is_out(map, weight, force, x)) { 500 force = map->device_parents[force];
502 while (1) { 501 else
503 force_context[++force_pos] = force; 502 force = map->bucket_parents[-1-force];
504 if (force >= 0) 503 if (force == 0)
505 force = map->device_parents[force]; 504 break;
506 else
507 force = map->bucket_parents[-1-force];
508 if (force == 0)
509 break;
510 }
511 } 505 }
512 } 506 }
513 507
@@ -600,10 +594,7 @@ int crush_do_rule(struct crush_map *map,
600 BUG_ON(1); 594 BUG_ON(1);
601 } 595 }
602 } 596 }
603 rc = result_len; 597 return result_len;
604
605out:
606 return rc;
607} 598}
608 599
609 600
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 733e46008b89..f4f3f58f5234 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -244,7 +244,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
244 ceph_pagelist_init(req->r_trail); 244 ceph_pagelist_init(req->r_trail);
245 } 245 }
246 /* create request message; allow space for oid */ 246 /* create request message; allow space for oid */
247 msg_size += 40; 247 msg_size += MAX_OBJ_NAME_SIZE;
248 if (snapc) 248 if (snapc)
249 msg_size += sizeof(u64) * snapc->num_snaps; 249 msg_size += sizeof(u64) * snapc->num_snaps;
250 if (use_mempool) 250 if (use_mempool)
diff --git a/net/core/dev.c b/net/core/dev.c
index 6ba50a1e404c..5a13edfc9f73 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1396,7 +1396,7 @@ rollback:
1396 for_each_net(net) { 1396 for_each_net(net) {
1397 for_each_netdev(net, dev) { 1397 for_each_netdev(net, dev) {
1398 if (dev == last) 1398 if (dev == last)
1399 break; 1399 goto outroll;
1400 1400
1401 if (dev->flags & IFF_UP) { 1401 if (dev->flags & IFF_UP) {
1402 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); 1402 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
@@ -1407,6 +1407,7 @@ rollback:
1407 } 1407 }
1408 } 1408 }
1409 1409
1410outroll:
1410 raw_notifier_chain_unregister(&netdev_chain, nb); 1411 raw_notifier_chain_unregister(&netdev_chain, nb);
1411 goto unlock; 1412 goto unlock;
1412} 1413}
@@ -4282,6 +4283,12 @@ static int dev_seq_open(struct inode *inode, struct file *file)
4282 sizeof(struct dev_iter_state)); 4283 sizeof(struct dev_iter_state));
4283} 4284}
4284 4285
4286int dev_seq_open_ops(struct inode *inode, struct file *file,
4287 const struct seq_operations *ops)
4288{
4289 return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
4290}
4291
4285static const struct file_operations dev_seq_fops = { 4292static const struct file_operations dev_seq_fops = {
4286 .owner = THIS_MODULE, 4293 .owner = THIS_MODULE,
4287 .open = dev_seq_open, 4294 .open = dev_seq_open,
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 277faef9148d..febba516db62 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -696,8 +696,7 @@ static const struct seq_operations dev_mc_seq_ops = {
696 696
697static int dev_mc_seq_open(struct inode *inode, struct file *file) 697static int dev_mc_seq_open(struct inode *inode, struct file *file)
698{ 698{
699 return seq_open_net(inode, file, &dev_mc_seq_ops, 699 return dev_seq_open_ops(inode, file, &dev_mc_seq_ops);
700 sizeof(struct seq_net_private));
701} 700}
702 701
703static const struct file_operations dev_mc_seq_fops = { 702static const struct file_operations dev_mc_seq_fops = {
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 039d51e6c284..5ac07d31fbc9 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2397,7 +2397,10 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2397 struct net *net = seq_file_net(seq); 2397 struct net *net = seq_file_net(seq);
2398 struct neigh_table *tbl = state->tbl; 2398 struct neigh_table *tbl = state->tbl;
2399 2399
2400 pn = pn->next; 2400 do {
2401 pn = pn->next;
2402 } while (pn && !net_eq(pneigh_net(pn), net));
2403
2401 while (!pn) { 2404 while (!pn) {
2402 if (++state->bucket > PNEIGH_HASHMASK) 2405 if (++state->bucket > PNEIGH_HASHMASK)
2403 break; 2406 break;
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 182236b2510a..9b570a6a33c5 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -26,10 +26,11 @@
26 * but then some measure against one socket starving all other sockets 26 * but then some measure against one socket starving all other sockets
27 * would be needed. 27 * would be needed.
28 * 28 *
29 * It was 128 by default. Experiments with real servers show, that 29 * The minimum value of it is 128. Experiments with real servers show that
30 * it is absolutely not enough even at 100conn/sec. 256 cures most 30 * it is absolutely not enough even at 100conn/sec. 256 cures most
31 * of problems. This value is adjusted to 128 for very small machines 31 * of problems.
32 * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb). 32 * This value is adjusted to 128 for low memory machines,
33 * and it will increase in proportion to the memory of machine.
33 * Note : Dont forget somaxconn that may limit backlog too. 34 * Note : Dont forget somaxconn that may limit backlog too.
34 */ 35 */
35int sysctl_max_syn_backlog = 256; 36int sysctl_max_syn_backlog = 256;
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 025233de25f9..925991ae6f52 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -19,6 +19,7 @@ static int __init net_secret_init(void)
19} 19}
20late_initcall(net_secret_init); 20late_initcall(net_secret_init);
21 21
22#ifdef CONFIG_INET
22static u32 seq_scale(u32 seq) 23static u32 seq_scale(u32 seq)
23{ 24{
24 /* 25 /*
@@ -33,6 +34,7 @@ static u32 seq_scale(u32 seq)
33 */ 34 */
34 return seq + (ktime_to_ns(ktime_get_real()) >> 6); 35 return seq + (ktime_to_ns(ktime_get_real()) >> 6);
35} 36}
37#endif
36 38
37#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 39#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
38__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, 40__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 18a3cebb753d..3c30ee4a5710 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2230,7 +2230,7 @@ static int skb_prepare_for_shift(struct sk_buff *skb)
2230 * @shiftlen: shift up to this many bytes 2230 * @shiftlen: shift up to this many bytes
2231 * 2231 *
2232 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2232 * Attempts to shift up to shiftlen worth of bytes, which may be less than
2233 * the length of the skb, from tgt to skb. Returns number bytes shifted. 2233 * the length of the skb, from skb to tgt. Returns number bytes shifted.
2234 * It's up to caller to free skb if everything was shifted. 2234 * It's up to caller to free skb if everything was shifted.
2235 * 2235 *
2236 * If @tgt runs out of frags, the whole operation is aborted. 2236 * If @tgt runs out of frags, the whole operation is aborted.
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 90a919afbed7..3f4e5414c8e5 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -111,6 +111,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
111 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, 111 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
112 inet->inet_sport, inet->inet_dport, sk); 112 inet->inet_sport, inet->inet_dport, sk);
113 if (IS_ERR(rt)) { 113 if (IS_ERR(rt)) {
114 err = PTR_ERR(rt);
114 rt = NULL; 115 rt = NULL;
115 goto failure; 116 goto failure;
116 } 117 }
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index a77d16158eb6..94f4ec036669 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -112,7 +112,7 @@ static unsigned long dn_rt_deadline;
112static int dn_dst_gc(struct dst_ops *ops); 112static int dn_dst_gc(struct dst_ops *ops);
113static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); 113static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
114static unsigned int dn_dst_default_advmss(const struct dst_entry *dst); 114static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
115static unsigned int dn_dst_default_mtu(const struct dst_entry *dst); 115static unsigned int dn_dst_mtu(const struct dst_entry *dst);
116static void dn_dst_destroy(struct dst_entry *); 116static void dn_dst_destroy(struct dst_entry *);
117static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); 117static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
118static void dn_dst_link_failure(struct sk_buff *); 118static void dn_dst_link_failure(struct sk_buff *);
@@ -135,7 +135,7 @@ static struct dst_ops dn_dst_ops = {
135 .gc = dn_dst_gc, 135 .gc = dn_dst_gc,
136 .check = dn_dst_check, 136 .check = dn_dst_check,
137 .default_advmss = dn_dst_default_advmss, 137 .default_advmss = dn_dst_default_advmss,
138 .default_mtu = dn_dst_default_mtu, 138 .mtu = dn_dst_mtu,
139 .cow_metrics = dst_cow_metrics_generic, 139 .cow_metrics = dst_cow_metrics_generic,
140 .destroy = dn_dst_destroy, 140 .destroy = dn_dst_destroy,
141 .negative_advice = dn_dst_negative_advice, 141 .negative_advice = dn_dst_negative_advice,
@@ -825,9 +825,11 @@ static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
825 return dn_mss_from_pmtu(dst->dev, dst_mtu(dst)); 825 return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
826} 826}
827 827
828static unsigned int dn_dst_default_mtu(const struct dst_entry *dst) 828static unsigned int dn_dst_mtu(const struct dst_entry *dst)
829{ 829{
830 return dst->dev->mtu; 830 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
831
832 return mtu ? : dst->dev->mtu;
831} 833}
832 834
833static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) 835static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c
index 67f691bd4acf..d9c150cc59a9 100644
--- a/net/decnet/dn_timer.c
+++ b/net/decnet/dn_timer.c
@@ -36,16 +36,13 @@ static void dn_slow_timer(unsigned long arg);
36 36
37void dn_start_slow_timer(struct sock *sk) 37void dn_start_slow_timer(struct sock *sk)
38{ 38{
39 sk->sk_timer.expires = jiffies + SLOW_INTERVAL; 39 setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
40 sk->sk_timer.function = dn_slow_timer; 40 sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
41 sk->sk_timer.data = (unsigned long)sk;
42
43 add_timer(&sk->sk_timer);
44} 41}
45 42
46void dn_stop_slow_timer(struct sock *sk) 43void dn_stop_slow_timer(struct sock *sk)
47{ 44{
48 del_timer(&sk->sk_timer); 45 sk_stop_timer(sk, &sk->sk_timer);
49} 46}
50 47
51static void dn_slow_timer(unsigned long arg) 48static void dn_slow_timer(unsigned long arg)
@@ -53,12 +50,10 @@ static void dn_slow_timer(unsigned long arg)
53 struct sock *sk = (struct sock *)arg; 50 struct sock *sk = (struct sock *)arg;
54 struct dn_scp *scp = DN_SK(sk); 51 struct dn_scp *scp = DN_SK(sk);
55 52
56 sock_hold(sk);
57 bh_lock_sock(sk); 53 bh_lock_sock(sk);
58 54
59 if (sock_owned_by_user(sk)) { 55 if (sock_owned_by_user(sk)) {
60 sk->sk_timer.expires = jiffies + HZ / 10; 56 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
61 add_timer(&sk->sk_timer);
62 goto out; 57 goto out;
63 } 58 }
64 59
@@ -100,9 +95,7 @@ static void dn_slow_timer(unsigned long arg)
100 scp->keepalive_fxn(sk); 95 scp->keepalive_fxn(sk);
101 } 96 }
102 97
103 sk->sk_timer.expires = jiffies + SLOW_INTERVAL; 98 sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
104
105 add_timer(&sk->sk_timer);
106out: 99out:
107 bh_unlock_sock(sk); 100 bh_unlock_sock(sk);
108 sock_put(sk); 101 sock_put(sk);
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index c1f4154552fc..36d14406261e 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -136,8 +136,6 @@ static void ah_output_done(struct crypto_async_request *base, int err)
136 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); 136 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
137 } 137 }
138 138
139 err = ah->nexthdr;
140
141 kfree(AH_SKB_CB(skb)->tmp); 139 kfree(AH_SKB_CB(skb)->tmp);
142 xfrm_output_resume(skb, err); 140 xfrm_output_resume(skb, err);
143} 141}
@@ -264,12 +262,12 @@ static void ah_input_done(struct crypto_async_request *base, int err)
264 if (err) 262 if (err)
265 goto out; 263 goto out;
266 264
265 err = ah->nexthdr;
266
267 skb->network_header += ah_hlen; 267 skb->network_header += ah_hlen;
268 memcpy(skb_network_header(skb), work_iph, ihl); 268 memcpy(skb_network_header(skb), work_iph, ihl);
269 __skb_pull(skb, ah_hlen + ihl); 269 __skb_pull(skb, ah_hlen + ihl);
270 skb_set_transport_header(skb, -ihl); 270 skb_set_transport_header(skb, -ihl);
271
272 err = ah->nexthdr;
273out: 271out:
274 kfree(AH_SKB_CB(skb)->tmp); 272 kfree(AH_SKB_CB(skb)->tmp);
275 xfrm_input_resume(skb, err); 273 xfrm_input_resume(skb, err);
@@ -371,8 +369,6 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
371 if (err == -EINPROGRESS) 369 if (err == -EINPROGRESS)
372 goto out; 370 goto out;
373 371
374 if (err == -EBUSY)
375 err = NET_XMIT_DROP;
376 goto out_free; 372 goto out_free;
377 } 373 }
378 374
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index c6b5092f29a1..65f01dc47565 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
1490 void __user *buffer, 1490 void __user *buffer,
1491 size_t *lenp, loff_t *ppos) 1491 size_t *lenp, loff_t *ppos)
1492{ 1492{
1493 int old_value = *(int *)ctl->data;
1493 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 1494 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1495 int new_value = *(int *)ctl->data;
1494 1496
1495 if (write) { 1497 if (write) {
1496 struct ipv4_devconf *cnf = ctl->extra1; 1498 struct ipv4_devconf *cnf = ctl->extra1;
@@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
1501 1503
1502 if (cnf == net->ipv4.devconf_dflt) 1504 if (cnf == net->ipv4.devconf_dflt)
1503 devinet_copy_dflt_conf(net, i); 1505 devinet_copy_dflt_conf(net, i);
1506 if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
1507 if ((new_value == 0) && (old_value != 0))
1508 rt_cache_flush(net, 0);
1504 } 1509 }
1505 1510
1506 return ret; 1511 return ret;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index c7472eff2d51..b2ca095cb9da 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1716,7 +1716,8 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1716 if (err) { 1716 if (err) {
1717 int j; 1717 int j;
1718 1718
1719 pmc->sfcount[sfmode]--; 1719 if (!delta)
1720 pmc->sfcount[sfmode]--;
1720 for (j=0; j<i; j++) 1721 for (j=0; j<i; j++)
1721 (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]); 1722 (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
1722 } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) { 1723 } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index f5e2bdaef949..ccee270a9b65 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -108,9 +108,6 @@ static int inet_csk_diag_fill(struct sock *sk,
108 icsk->icsk_ca_ops->name); 108 icsk->icsk_ca_ops->name);
109 } 109 }
110 110
111 if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6))
112 RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
113
114 r->idiag_family = sk->sk_family; 111 r->idiag_family = sk->sk_family;
115 r->idiag_state = sk->sk_state; 112 r->idiag_state = sk->sk_state;
116 r->idiag_timer = 0; 113 r->idiag_timer = 0;
@@ -125,16 +122,23 @@ static int inet_csk_diag_fill(struct sock *sk,
125 r->id.idiag_src[0] = inet->inet_rcv_saddr; 122 r->id.idiag_src[0] = inet->inet_rcv_saddr;
126 r->id.idiag_dst[0] = inet->inet_daddr; 123 r->id.idiag_dst[0] = inet->inet_daddr;
127 124
125 /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
126 * hence this needs to be included regardless of socket family.
127 */
128 if (ext & (1 << (INET_DIAG_TOS - 1)))
129 RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
130
128#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 131#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
129 if (r->idiag_family == AF_INET6) { 132 if (r->idiag_family == AF_INET6) {
130 const struct ipv6_pinfo *np = inet6_sk(sk); 133 const struct ipv6_pinfo *np = inet6_sk(sk);
131 134
135 if (ext & (1 << (INET_DIAG_TCLASS - 1)))
136 RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
137
132 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 138 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
133 &np->rcv_saddr); 139 &np->rcv_saddr);
134 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, 140 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
135 &np->daddr); 141 &np->daddr);
136 if (ext & (1 << (INET_DIAG_TOS - 1)))
137 RTA_PUT_U8(skb, INET_DIAG_TOS, np->tclass);
138 } 142 }
139#endif 143#endif
140 144
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 3b34d1c86270..29a07b6c7168 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
84 84
85 rt = skb_rtable(skb); 85 rt = skb_rtable(skb);
86 86
87 if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway) 87 if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
88 goto sr_failed; 88 goto sr_failed;
89 89
90 if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && 90 if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index ec93335901dd..1e60f7679075 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -568,12 +568,13 @@ void ip_forward_options(struct sk_buff *skb)
568 ) { 568 ) {
569 if (srrptr + 3 > srrspace) 569 if (srrptr + 3 > srrspace)
570 break; 570 break;
571 if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0) 571 if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0)
572 break; 572 break;
573 } 573 }
574 if (srrptr + 3 <= srrspace) { 574 if (srrptr + 3 <= srrspace) {
575 opt->is_changed = 1; 575 opt->is_changed = 1;
576 ip_rt_get_source(&optptr[srrptr-1], skb, rt); 576 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
577 ip_hdr(skb)->daddr = opt->nexthop;
577 optptr[2] = srrptr+4; 578 optptr[2] = srrptr+4;
578 } else if (net_ratelimit()) 579 } else if (net_ratelimit())
579 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); 580 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
@@ -640,6 +641,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
640 } 641 }
641 if (srrptr <= srrspace) { 642 if (srrptr <= srrspace) {
642 opt->srr_is_hit = 1; 643 opt->srr_is_hit = 1;
644 opt->nexthop = nexthop;
643 opt->is_changed = 1; 645 opt->is_changed = 1;
644 } 646 }
645 return 0; 647 return 0;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 065effd8349a..0b2e7329abda 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
285 if (register_netdevice(dev) < 0) 285 if (register_netdevice(dev) < 0)
286 goto failed_free; 286 goto failed_free;
287 287
288 strcpy(nt->parms.name, dev->name);
289
288 dev_hold(dev); 290 dev_hold(dev);
289 ipip_tunnel_link(ipn, nt); 291 ipip_tunnel_link(ipn, nt);
290 return nt; 292 return nt;
@@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev)
759 struct ip_tunnel *tunnel = netdev_priv(dev); 761 struct ip_tunnel *tunnel = netdev_priv(dev);
760 762
761 tunnel->dev = dev; 763 tunnel->dev = dev;
762 strcpy(tunnel->parms.name, dev->name);
763 764
764 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); 765 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
765 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); 766 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
825static int __net_init ipip_init_net(struct net *net) 826static int __net_init ipip_init_net(struct net *net)
826{ 827{
827 struct ipip_net *ipn = net_generic(net, ipip_net_id); 828 struct ipip_net *ipn = net_generic(net, ipip_net_id);
829 struct ip_tunnel *t;
828 int err; 830 int err;
829 831
830 ipn->tunnels[0] = ipn->tunnels_wc; 832 ipn->tunnels[0] = ipn->tunnels_wc;
@@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net)
848 if ((err = register_netdev(ipn->fb_tunnel_dev))) 850 if ((err = register_netdev(ipn->fb_tunnel_dev)))
849 goto err_reg_dev; 851 goto err_reg_dev;
850 852
853 t = netdev_priv(ipn->fb_tunnel_dev);
854
855 strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
851 return 0; 856 return 0;
852 857
853err_reg_dev: 858err_reg_dev:
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 9899619ab9b8..4f47e064e262 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -64,7 +64,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
64 /* Change in oif may mean change in hh_len. */ 64 /* Change in oif may mean change in hh_len. */
65 hh_len = skb_dst(skb)->dev->hard_header_len; 65 hh_len = skb_dst(skb)->dev->hard_header_len;
66 if (skb_headroom(skb) < hh_len && 66 if (skb_headroom(skb) < hh_len &&
67 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) 67 pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
68 0, GFP_ATOMIC))
68 return -1; 69 return -1;
69 70
70 return 0; 71 return 0;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 1dfc18a03fd4..f19f2182894c 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -325,7 +325,6 @@ config IP_NF_TARGET_TTL
325# raw + specific targets 325# raw + specific targets
326config IP_NF_RAW 326config IP_NF_RAW
327 tristate 'raw table support (required for NOTRACK/TRACE)' 327 tristate 'raw table support (required for NOTRACK/TRACE)'
328 depends on NETFILTER_ADVANCED
329 help 328 help
330 This option adds a `raw' table to iptables. This table is the very 329 This option adds a `raw' table to iptables. This table is the very
331 first in the netfilter framework and hooks in at the PREROUTING 330 first in the netfilter framework and hooks in at the PREROUTING
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index a06f73fdb3c0..43d4c3b22369 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -339,7 +339,6 @@ void ping_err(struct sk_buff *skb, u32 info)
339 sk = ping_v4_lookup(net, iph->daddr, iph->saddr, 339 sk = ping_v4_lookup(net, iph->daddr, iph->saddr,
340 ntohs(icmph->un.echo.id), skb->dev->ifindex); 340 ntohs(icmph->un.echo.id), skb->dev->ifindex);
341 if (sk == NULL) { 341 if (sk == NULL) {
342 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
343 pr_debug("no socket, dropping\n"); 342 pr_debug("no socket, dropping\n");
344 return; /* No socket for error */ 343 return; /* No socket for error */
345 } 344 }
@@ -679,7 +678,6 @@ static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
679 pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n", 678 pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
680 inet_sk(sk), inet_sk(sk)->inet_num, skb); 679 inet_sk(sk), inet_sk(sk)->inet_num, skb);
681 if (sock_queue_rcv_skb(sk, skb) < 0) { 680 if (sock_queue_rcv_skb(sk, skb) < 0) {
682 ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_INERRORS);
683 kfree_skb(skb); 681 kfree_skb(skb);
684 pr_debug("ping_queue_rcv_skb -> failed\n"); 682 pr_debug("ping_queue_rcv_skb -> failed\n");
685 return -1; 683 return -1;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 155138d8ec8b..46af62363b8c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -112,7 +112,7 @@
112#include <net/secure_seq.h> 112#include <net/secure_seq.h>
113 113
114#define RT_FL_TOS(oldflp4) \ 114#define RT_FL_TOS(oldflp4) \
115 ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))) 115 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
116 116
117#define IP_MAX_MTU 0xFFF0 117#define IP_MAX_MTU 0xFFF0
118 118
@@ -131,6 +131,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
131static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 131static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
132static int ip_rt_min_advmss __read_mostly = 256; 132static int ip_rt_min_advmss __read_mostly = 256;
133static int rt_chain_length_max __read_mostly = 20; 133static int rt_chain_length_max __read_mostly = 20;
134static int redirect_genid;
134 135
135/* 136/*
136 * Interface to generic destination cache. 137 * Interface to generic destination cache.
@@ -138,7 +139,7 @@ static int rt_chain_length_max __read_mostly = 20;
138 139
139static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); 140static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
140static unsigned int ipv4_default_advmss(const struct dst_entry *dst); 141static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
141static unsigned int ipv4_default_mtu(const struct dst_entry *dst); 142static unsigned int ipv4_mtu(const struct dst_entry *dst);
142static void ipv4_dst_destroy(struct dst_entry *dst); 143static void ipv4_dst_destroy(struct dst_entry *dst);
143static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); 144static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
144static void ipv4_link_failure(struct sk_buff *skb); 145static void ipv4_link_failure(struct sk_buff *skb);
@@ -193,7 +194,7 @@ static struct dst_ops ipv4_dst_ops = {
193 .gc = rt_garbage_collect, 194 .gc = rt_garbage_collect,
194 .check = ipv4_dst_check, 195 .check = ipv4_dst_check,
195 .default_advmss = ipv4_default_advmss, 196 .default_advmss = ipv4_default_advmss,
196 .default_mtu = ipv4_default_mtu, 197 .mtu = ipv4_mtu,
197 .cow_metrics = ipv4_cow_metrics, 198 .cow_metrics = ipv4_cow_metrics,
198 .destroy = ipv4_dst_destroy, 199 .destroy = ipv4_dst_destroy,
199 .ifdown = ipv4_dst_ifdown, 200 .ifdown = ipv4_dst_ifdown,
@@ -416,9 +417,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
416 else { 417 else {
417 struct rtable *r = v; 418 struct rtable *r = v;
418 struct neighbour *n; 419 struct neighbour *n;
419 int len; 420 int len, HHUptod;
420 421
422 rcu_read_lock();
421 n = dst_get_neighbour(&r->dst); 423 n = dst_get_neighbour(&r->dst);
424 HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
425 rcu_read_unlock();
426
422 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" 427 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
423 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", 428 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
424 r->dst.dev ? r->dst.dev->name : "*", 429 r->dst.dev ? r->dst.dev->name : "*",
@@ -432,7 +437,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
432 dst_metric(&r->dst, RTAX_RTTVAR)), 437 dst_metric(&r->dst, RTAX_RTTVAR)),
433 r->rt_key_tos, 438 r->rt_key_tos,
434 -1, 439 -1,
435 (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0, 440 HHUptod,
436 r->rt_spec_dst, &len); 441 r->rt_spec_dst, &len);
437 442
438 seq_printf(seq, "%*s\n", 127 - len, ""); 443 seq_printf(seq, "%*s\n", 127 - len, "");
@@ -837,6 +842,7 @@ static void rt_cache_invalidate(struct net *net)
837 842
838 get_random_bytes(&shuffle, sizeof(shuffle)); 843 get_random_bytes(&shuffle, sizeof(shuffle));
839 atomic_add(shuffle + 1U, &net->ipv4.rt_genid); 844 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
845 redirect_genid++;
840} 846}
841 847
842/* 848/*
@@ -1304,16 +1310,40 @@ static void rt_del(unsigned hash, struct rtable *rt)
1304 spin_unlock_bh(rt_hash_lock_addr(hash)); 1310 spin_unlock_bh(rt_hash_lock_addr(hash));
1305} 1311}
1306 1312
1313static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1314{
1315 struct rtable *rt = (struct rtable *) dst;
1316 __be32 orig_gw = rt->rt_gateway;
1317 struct neighbour *n, *old_n;
1318
1319 dst_confirm(&rt->dst);
1320
1321 rt->rt_gateway = peer->redirect_learned.a4;
1322
1323 n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
1324 if (IS_ERR(n)) {
1325 rt->rt_gateway = orig_gw;
1326 return;
1327 }
1328 old_n = xchg(&rt->dst._neighbour, n);
1329 if (old_n)
1330 neigh_release(old_n);
1331 if (!(n->nud_state & NUD_VALID)) {
1332 neigh_event_send(n, NULL);
1333 } else {
1334 rt->rt_flags |= RTCF_REDIRECTED;
1335 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1336 }
1337}
1338
1307/* called in rcu_read_lock() section */ 1339/* called in rcu_read_lock() section */
1308void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, 1340void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1309 __be32 saddr, struct net_device *dev) 1341 __be32 saddr, struct net_device *dev)
1310{ 1342{
1311 int s, i; 1343 int s, i;
1312 struct in_device *in_dev = __in_dev_get_rcu(dev); 1344 struct in_device *in_dev = __in_dev_get_rcu(dev);
1313 struct rtable *rt;
1314 __be32 skeys[2] = { saddr, 0 }; 1345 __be32 skeys[2] = { saddr, 0 };
1315 int ikeys[2] = { dev->ifindex, 0 }; 1346 int ikeys[2] = { dev->ifindex, 0 };
1316 struct flowi4 fl4;
1317 struct inet_peer *peer; 1347 struct inet_peer *peer;
1318 struct net *net; 1348 struct net *net;
1319 1349
@@ -1336,33 +1366,44 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1336 goto reject_redirect; 1366 goto reject_redirect;
1337 } 1367 }
1338 1368
1339 memset(&fl4, 0, sizeof(fl4));
1340 fl4.daddr = daddr;
1341 for (s = 0; s < 2; s++) { 1369 for (s = 0; s < 2; s++) {
1342 for (i = 0; i < 2; i++) { 1370 for (i = 0; i < 2; i++) {
1343 fl4.flowi4_oif = ikeys[i]; 1371 unsigned int hash;
1344 fl4.saddr = skeys[s]; 1372 struct rtable __rcu **rthp;
1345 rt = __ip_route_output_key(net, &fl4); 1373 struct rtable *rt;
1346 if (IS_ERR(rt)) 1374
1347 continue; 1375 hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net));
1348 1376
1349 if (rt->dst.error || rt->dst.dev != dev || 1377 rthp = &rt_hash_table[hash].chain;
1350 rt->rt_gateway != old_gw) { 1378
1351 ip_rt_put(rt); 1379 while ((rt = rcu_dereference(*rthp)) != NULL) {
1352 continue; 1380 rthp = &rt->dst.rt_next;
1353 } 1381
1354 1382 if (rt->rt_key_dst != daddr ||
1355 if (!rt->peer) 1383 rt->rt_key_src != skeys[s] ||
1356 rt_bind_peer(rt, rt->rt_dst, 1); 1384 rt->rt_oif != ikeys[i] ||
1385 rt_is_input_route(rt) ||
1386 rt_is_expired(rt) ||
1387 !net_eq(dev_net(rt->dst.dev), net) ||
1388 rt->dst.error ||
1389 rt->dst.dev != dev ||
1390 rt->rt_gateway != old_gw)
1391 continue;
1357 1392
1358 peer = rt->peer; 1393 if (!rt->peer)
1359 if (peer) { 1394 rt_bind_peer(rt, rt->rt_dst, 1);
1360 peer->redirect_learned.a4 = new_gw; 1395
1361 atomic_inc(&__rt_peer_genid); 1396 peer = rt->peer;
1397 if (peer) {
1398 if (peer->redirect_learned.a4 != new_gw ||
1399 peer->redirect_genid != redirect_genid) {
1400 peer->redirect_learned.a4 = new_gw;
1401 peer->redirect_genid = redirect_genid;
1402 atomic_inc(&__rt_peer_genid);
1403 }
1404 check_peer_redir(&rt->dst, peer);
1405 }
1362 } 1406 }
1363
1364 ip_rt_put(rt);
1365 return;
1366 } 1407 }
1367 } 1408 }
1368 return; 1409 return;
@@ -1649,40 +1690,9 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1649 } 1690 }
1650} 1691}
1651 1692
1652static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1653{
1654 struct rtable *rt = (struct rtable *) dst;
1655 __be32 orig_gw = rt->rt_gateway;
1656 struct neighbour *n, *old_n;
1657
1658 dst_confirm(&rt->dst);
1659
1660 rt->rt_gateway = peer->redirect_learned.a4;
1661
1662 n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
1663 if (IS_ERR(n))
1664 return PTR_ERR(n);
1665 old_n = xchg(&rt->dst._neighbour, n);
1666 if (old_n)
1667 neigh_release(old_n);
1668 if (!n || !(n->nud_state & NUD_VALID)) {
1669 if (n)
1670 neigh_event_send(n, NULL);
1671 rt->rt_gateway = orig_gw;
1672 return -EAGAIN;
1673 } else {
1674 rt->rt_flags |= RTCF_REDIRECTED;
1675 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1676 }
1677 return 0;
1678}
1679 1693
1680static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) 1694static void ipv4_validate_peer(struct rtable *rt)
1681{ 1695{
1682 struct rtable *rt = (struct rtable *) dst;
1683
1684 if (rt_is_expired(rt))
1685 return NULL;
1686 if (rt->rt_peer_genid != rt_peer_genid()) { 1696 if (rt->rt_peer_genid != rt_peer_genid()) {
1687 struct inet_peer *peer; 1697 struct inet_peer *peer;
1688 1698
@@ -1691,17 +1701,26 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1691 1701
1692 peer = rt->peer; 1702 peer = rt->peer;
1693 if (peer) { 1703 if (peer) {
1694 check_peer_pmtu(dst, peer); 1704 check_peer_pmtu(&rt->dst, peer);
1695 1705
1706 if (peer->redirect_genid != redirect_genid)
1707 peer->redirect_learned.a4 = 0;
1696 if (peer->redirect_learned.a4 && 1708 if (peer->redirect_learned.a4 &&
1697 peer->redirect_learned.a4 != rt->rt_gateway) { 1709 peer->redirect_learned.a4 != rt->rt_gateway)
1698 if (check_peer_redir(dst, peer)) 1710 check_peer_redir(&rt->dst, peer);
1699 return NULL;
1700 }
1701 } 1711 }
1702 1712
1703 rt->rt_peer_genid = rt_peer_genid(); 1713 rt->rt_peer_genid = rt_peer_genid();
1704 } 1714 }
1715}
1716
1717static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1718{
1719 struct rtable *rt = (struct rtable *) dst;
1720
1721 if (rt_is_expired(rt))
1722 return NULL;
1723 ipv4_validate_peer(rt);
1705 return dst; 1724 return dst;
1706} 1725}
1707 1726
@@ -1806,12 +1825,17 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1806 return advmss; 1825 return advmss;
1807} 1826}
1808 1827
1809static unsigned int ipv4_default_mtu(const struct dst_entry *dst) 1828static unsigned int ipv4_mtu(const struct dst_entry *dst)
1810{ 1829{
1811 unsigned int mtu = dst->dev->mtu; 1830 const struct rtable *rt = (const struct rtable *) dst;
1831 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1832
1833 if (mtu && rt_is_output_route(rt))
1834 return mtu;
1835
1836 mtu = dst->dev->mtu;
1812 1837
1813 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { 1838 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1814 const struct rtable *rt = (const struct rtable *) dst;
1815 1839
1816 if (rt->rt_gateway != rt->rt_dst && mtu > 576) 1840 if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1817 mtu = 576; 1841 mtu = 576;
@@ -1844,6 +1868,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1844 dst_init_metrics(&rt->dst, peer->metrics, false); 1868 dst_init_metrics(&rt->dst, peer->metrics, false);
1845 1869
1846 check_peer_pmtu(&rt->dst, peer); 1870 check_peer_pmtu(&rt->dst, peer);
1871 if (peer->redirect_genid != redirect_genid)
1872 peer->redirect_learned.a4 = 0;
1847 if (peer->redirect_learned.a4 && 1873 if (peer->redirect_learned.a4 &&
1848 peer->redirect_learned.a4 != rt->rt_gateway) { 1874 peer->redirect_learned.a4 != rt->rt_gateway) {
1849 rt->rt_gateway = peer->redirect_learned.a4; 1875 rt->rt_gateway = peer->redirect_learned.a4;
@@ -2349,6 +2375,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2349 rth->rt_mark == skb->mark && 2375 rth->rt_mark == skb->mark &&
2350 net_eq(dev_net(rth->dst.dev), net) && 2376 net_eq(dev_net(rth->dst.dev), net) &&
2351 !rt_is_expired(rth)) { 2377 !rt_is_expired(rth)) {
2378 ipv4_validate_peer(rth);
2352 if (noref) { 2379 if (noref) {
2353 dst_use_noref(&rth->dst, jiffies); 2380 dst_use_noref(&rth->dst, jiffies);
2354 skb_dst_set_noref(skb, &rth->dst); 2381 skb_dst_set_noref(skb, &rth->dst);
@@ -2407,11 +2434,11 @@ EXPORT_SYMBOL(ip_route_input_common);
2407static struct rtable *__mkroute_output(const struct fib_result *res, 2434static struct rtable *__mkroute_output(const struct fib_result *res,
2408 const struct flowi4 *fl4, 2435 const struct flowi4 *fl4,
2409 __be32 orig_daddr, __be32 orig_saddr, 2436 __be32 orig_daddr, __be32 orig_saddr,
2410 int orig_oif, struct net_device *dev_out, 2437 int orig_oif, __u8 orig_rtos,
2438 struct net_device *dev_out,
2411 unsigned int flags) 2439 unsigned int flags)
2412{ 2440{
2413 struct fib_info *fi = res->fi; 2441 struct fib_info *fi = res->fi;
2414 u32 tos = RT_FL_TOS(fl4);
2415 struct in_device *in_dev; 2442 struct in_device *in_dev;
2416 u16 type = res->type; 2443 u16 type = res->type;
2417 struct rtable *rth; 2444 struct rtable *rth;
@@ -2462,7 +2489,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2462 rth->rt_genid = rt_genid(dev_net(dev_out)); 2489 rth->rt_genid = rt_genid(dev_net(dev_out));
2463 rth->rt_flags = flags; 2490 rth->rt_flags = flags;
2464 rth->rt_type = type; 2491 rth->rt_type = type;
2465 rth->rt_key_tos = tos; 2492 rth->rt_key_tos = orig_rtos;
2466 rth->rt_dst = fl4->daddr; 2493 rth->rt_dst = fl4->daddr;
2467 rth->rt_src = fl4->saddr; 2494 rth->rt_src = fl4->saddr;
2468 rth->rt_route_iif = 0; 2495 rth->rt_route_iif = 0;
@@ -2512,7 +2539,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2512static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4) 2539static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
2513{ 2540{
2514 struct net_device *dev_out = NULL; 2541 struct net_device *dev_out = NULL;
2515 u32 tos = RT_FL_TOS(fl4); 2542 __u8 tos = RT_FL_TOS(fl4);
2516 unsigned int flags = 0; 2543 unsigned int flags = 0;
2517 struct fib_result res; 2544 struct fib_result res;
2518 struct rtable *rth; 2545 struct rtable *rth;
@@ -2688,7 +2715,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
2688 2715
2689make_route: 2716make_route:
2690 rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif, 2717 rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
2691 dev_out, flags); 2718 tos, dev_out, flags);
2692 if (!IS_ERR(rth)) { 2719 if (!IS_ERR(rth)) {
2693 unsigned int hash; 2720 unsigned int hash;
2694 2721
@@ -2724,6 +2751,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
2724 (IPTOS_RT_MASK | RTO_ONLINK)) && 2751 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2725 net_eq(dev_net(rth->dst.dev), net) && 2752 net_eq(dev_net(rth->dst.dev), net) &&
2726 !rt_is_expired(rth)) { 2753 !rt_is_expired(rth)) {
2754 ipv4_validate_peer(rth);
2727 dst_use(&rth->dst, jiffies); 2755 dst_use(&rth->dst, jiffies);
2728 RT_CACHE_STAT_INC(out_hit); 2756 RT_CACHE_STAT_INC(out_hit);
2729 rcu_read_unlock_bh(); 2757 rcu_read_unlock_bh();
@@ -2747,9 +2775,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo
2747 return NULL; 2775 return NULL;
2748} 2776}
2749 2777
2750static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst) 2778static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2751{ 2779{
2752 return 0; 2780 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2781
2782 return mtu ? : dst->dev->mtu;
2753} 2783}
2754 2784
2755static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) 2785static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -2767,7 +2797,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
2767 .protocol = cpu_to_be16(ETH_P_IP), 2797 .protocol = cpu_to_be16(ETH_P_IP),
2768 .destroy = ipv4_dst_destroy, 2798 .destroy = ipv4_dst_destroy,
2769 .check = ipv4_blackhole_dst_check, 2799 .check = ipv4_blackhole_dst_check,
2770 .default_mtu = ipv4_blackhole_default_mtu, 2800 .mtu = ipv4_blackhole_mtu,
2771 .default_advmss = ipv4_default_advmss, 2801 .default_advmss = ipv4_default_advmss,
2772 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2802 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2773 .cow_metrics = ipv4_rt_blackhole_cow_metrics, 2803 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
@@ -2845,7 +2875,7 @@ static int rt_fill_info(struct net *net,
2845 struct rtable *rt = skb_rtable(skb); 2875 struct rtable *rt = skb_rtable(skb);
2846 struct rtmsg *r; 2876 struct rtmsg *r;
2847 struct nlmsghdr *nlh; 2877 struct nlmsghdr *nlh;
2848 long expires = 0; 2878 unsigned long expires = 0;
2849 const struct inet_peer *peer = rt->peer; 2879 const struct inet_peer *peer = rt->peer;
2850 u32 id = 0, ts = 0, tsage = 0, error; 2880 u32 id = 0, ts = 0, tsage = 0, error;
2851 2881
@@ -2902,8 +2932,12 @@ static int rt_fill_info(struct net *net,
2902 tsage = get_seconds() - peer->tcp_ts_stamp; 2932 tsage = get_seconds() - peer->tcp_ts_stamp;
2903 } 2933 }
2904 expires = ACCESS_ONCE(peer->pmtu_expires); 2934 expires = ACCESS_ONCE(peer->pmtu_expires);
2905 if (expires) 2935 if (expires) {
2906 expires -= jiffies; 2936 if (time_before(jiffies, expires))
2937 expires -= jiffies;
2938 else
2939 expires = 0;
2940 }
2907 } 2941 }
2908 2942
2909 if (rt_is_input_route(rt)) { 2943 if (rt_is_input_route(rt)) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a7443159c400..a9db4b1a2215 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1510,6 +1510,7 @@ exit:
1510 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1510 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1511 return NULL; 1511 return NULL;
1512put_and_exit: 1512put_and_exit:
1513 tcp_clear_xmit_timers(newsk);
1513 bh_unlock_sock(newsk); 1514 bh_unlock_sock(newsk);
1514 sock_put(newsk); 1515 sock_put(newsk);
1515 goto exit; 1516 goto exit;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 980b98f6288c..63170e297540 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1382,7 +1382,7 @@ static inline int tcp_minshall_check(const struct tcp_sock *tp)
1382/* Return 0, if packet can be sent now without violation Nagle's rules: 1382/* Return 0, if packet can be sent now without violation Nagle's rules:
1383 * 1. It is full sized. 1383 * 1. It is full sized.
1384 * 2. Or it contains FIN. (already checked by caller) 1384 * 2. Or it contains FIN. (already checked by caller)
1385 * 3. Or TCP_NODELAY was set. 1385 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1386 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1386 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1387 * With Minshall's modification: all sent small packets are ACKed. 1387 * With Minshall's modification: all sent small packets are ACKed.
1388 */ 1388 */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ab0966df1e2a..5a65eeac1d29 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1164,7 +1164,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1164 struct inet_sock *inet = inet_sk(sk); 1164 struct inet_sock *inet = inet_sk(sk);
1165 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; 1165 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
1166 struct sk_buff *skb; 1166 struct sk_buff *skb;
1167 unsigned int ulen; 1167 unsigned int ulen, copied;
1168 int peeked; 1168 int peeked;
1169 int err; 1169 int err;
1170 int is_udplite = IS_UDPLITE(sk); 1170 int is_udplite = IS_UDPLITE(sk);
@@ -1186,9 +1186,10 @@ try_again:
1186 goto out; 1186 goto out;
1187 1187
1188 ulen = skb->len - sizeof(struct udphdr); 1188 ulen = skb->len - sizeof(struct udphdr);
1189 if (len > ulen) 1189 copied = len;
1190 len = ulen; 1190 if (copied > ulen)
1191 else if (len < ulen) 1191 copied = ulen;
1192 else if (copied < ulen)
1192 msg->msg_flags |= MSG_TRUNC; 1193 msg->msg_flags |= MSG_TRUNC;
1193 1194
1194 /* 1195 /*
@@ -1197,14 +1198,14 @@ try_again:
1197 * coverage checksum (UDP-Lite), do it before the copy. 1198 * coverage checksum (UDP-Lite), do it before the copy.
1198 */ 1199 */
1199 1200
1200 if (len < ulen || UDP_SKB_CB(skb)->partial_cov) { 1201 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
1201 if (udp_lib_checksum_complete(skb)) 1202 if (udp_lib_checksum_complete(skb))
1202 goto csum_copy_err; 1203 goto csum_copy_err;
1203 } 1204 }
1204 1205
1205 if (skb_csum_unnecessary(skb)) 1206 if (skb_csum_unnecessary(skb))
1206 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 1207 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
1207 msg->msg_iov, len); 1208 msg->msg_iov, copied);
1208 else { 1209 else {
1209 err = skb_copy_and_csum_datagram_iovec(skb, 1210 err = skb_copy_and_csum_datagram_iovec(skb,
1210 sizeof(struct udphdr), 1211 sizeof(struct udphdr),
@@ -1233,7 +1234,7 @@ try_again:
1233 if (inet->cmsg_flags) 1234 if (inet->cmsg_flags)
1234 ip_cmsg_recv(msg, skb); 1235 ip_cmsg_recv(msg, skb);
1235 1236
1236 err = len; 1237 err = copied;
1237 if (flags & MSG_TRUNC) 1238 if (flags & MSG_TRUNC)
1238 err = ulen; 1239 err = ulen;
1239 1240
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cf88df82e2c2..36806def8cfd 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1805,7 +1805,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
1805 return ERR_PTR(-EACCES); 1805 return ERR_PTR(-EACCES);
1806 1806
1807 /* Add default multicast route */ 1807 /* Add default multicast route */
1808 addrconf_add_mroute(dev); 1808 if (!(dev->flags & IFF_LOOPBACK))
1809 addrconf_add_mroute(dev);
1809 1810
1810 /* Add link local route */ 1811 /* Add link local route */
1811 addrconf_add_lroute(dev); 1812 addrconf_add_lroute(dev);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 2195ae651923..4c0f894d0843 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -324,8 +324,6 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
324#endif 324#endif
325 } 325 }
326 326
327 err = ah->nexthdr;
328
329 kfree(AH_SKB_CB(skb)->tmp); 327 kfree(AH_SKB_CB(skb)->tmp);
330 xfrm_output_resume(skb, err); 328 xfrm_output_resume(skb, err);
331} 329}
@@ -466,12 +464,12 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
466 if (err) 464 if (err)
467 goto out; 465 goto out;
468 466
467 err = ah->nexthdr;
468
469 skb->network_header += ah_hlen; 469 skb->network_header += ah_hlen;
470 memcpy(skb_network_header(skb), work_iph, hdr_len); 470 memcpy(skb_network_header(skb), work_iph, hdr_len);
471 __skb_pull(skb, ah_hlen + hdr_len); 471 __skb_pull(skb, ah_hlen + hdr_len);
472 skb_set_transport_header(skb, -hdr_len); 472 skb_set_transport_header(skb, -hdr_len);
473
474 err = ah->nexthdr;
475out: 473out:
476 kfree(AH_SKB_CB(skb)->tmp); 474 kfree(AH_SKB_CB(skb)->tmp);
477 xfrm_input_resume(skb, err); 475 xfrm_input_resume(skb, err);
@@ -583,8 +581,6 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
583 if (err == -EINPROGRESS) 581 if (err == -EINPROGRESS)
584 goto out; 582 goto out;
585 583
586 if (err == -EBUSY)
587 err = NET_XMIT_DROP;
588 goto out_free; 584 goto out_free;
589 } 585 }
590 586
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index fee46d5a2f12..1567fb120392 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -85,7 +85,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
85 * request_sock (formerly open request) hash tables. 85 * request_sock (formerly open request) hash tables.
86 */ 86 */
87static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport, 87static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
88 const u32 rnd, const u16 synq_hsize) 88 const u32 rnd, const u32 synq_hsize)
89{ 89{
90 u32 c; 90 u32 c;
91 91
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 027c7ff6f1e5..a46c64eb0a66 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -111,6 +111,14 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
111 ipv6_addr_loopback(&hdr->daddr)) 111 ipv6_addr_loopback(&hdr->daddr))
112 goto err; 112 goto err;
113 113
114 /*
115 * RFC4291 2.7
116 * Multicast addresses must not be used as source addresses in IPv6
117 * packets or appear in any Routing header.
118 */
119 if (ipv6_addr_is_multicast(&hdr->saddr))
120 goto err;
121
114 skb->transport_header = skb->network_header + sizeof(*hdr); 122 skb->transport_header = skb->network_header + sizeof(*hdr);
115 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); 123 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
116 124
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index bdc15c9003d7..4e2e9ff67ef2 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -289,6 +289,8 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
289 if ((err = register_netdevice(dev)) < 0) 289 if ((err = register_netdevice(dev)) < 0)
290 goto failed_free; 290 goto failed_free;
291 291
292 strcpy(t->parms.name, dev->name);
293
292 dev_hold(dev); 294 dev_hold(dev);
293 ip6_tnl_link(ip6n, t); 295 ip6_tnl_link(ip6n, t);
294 return t; 296 return t;
@@ -1407,7 +1409,6 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
1407 struct ip6_tnl *t = netdev_priv(dev); 1409 struct ip6_tnl *t = netdev_priv(dev);
1408 1410
1409 t->dev = dev; 1411 t->dev = dev;
1410 strcpy(t->parms.name, dev->name);
1411 dev->tstats = alloc_percpu(struct pcpu_tstats); 1412 dev->tstats = alloc_percpu(struct pcpu_tstats);
1412 if (!dev->tstats) 1413 if (!dev->tstats)
1413 return -ENOMEM; 1414 return -ENOMEM;
@@ -1487,6 +1488,7 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1487static int __net_init ip6_tnl_init_net(struct net *net) 1488static int __net_init ip6_tnl_init_net(struct net *net)
1488{ 1489{
1489 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1490 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1491 struct ip6_tnl *t = NULL;
1490 int err; 1492 int err;
1491 1493
1492 ip6n->tnls[0] = ip6n->tnls_wc; 1494 ip6n->tnls[0] = ip6n->tnls_wc;
@@ -1507,6 +1509,10 @@ static int __net_init ip6_tnl_init_net(struct net *net)
1507 err = register_netdev(ip6n->fb_tnl_dev); 1509 err = register_netdev(ip6n->fb_tnl_dev);
1508 if (err < 0) 1510 if (err < 0)
1509 goto err_register; 1511 goto err_register;
1512
1513 t = netdev_priv(ip6n->fb_tnl_dev);
1514
1515 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
1510 return 0; 1516 return 0;
1511 1517
1512err_register: 1518err_register:
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index c99e3ee9781f..26cb08c84b74 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -503,7 +503,7 @@ done:
503 goto e_inval; 503 goto e_inval;
504 if (val > 255 || val < -1) 504 if (val > 255 || val < -1)
505 goto e_inval; 505 goto e_inval;
506 np->mcast_hops = val; 506 np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
507 retv = 0; 507 retv = 0;
508 break; 508 break;
509 509
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 44e5b7f2a6c1..0cb78d7ddaf5 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1571,7 +1571,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1571 } 1571 }
1572 if (!rt->rt6i_peer) 1572 if (!rt->rt6i_peer)
1573 rt6_bind_peer(rt, 1); 1573 rt6_bind_peer(rt, 1);
1574 if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) 1574 if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
1575 goto release; 1575 goto release;
1576 1576
1577 if (dev->addr_len) { 1577 if (dev->addr_len) {
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 448464844a25..f792b34cbe9c 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -186,7 +186,6 @@ config IP6_NF_MANGLE
186 186
187config IP6_NF_RAW 187config IP6_NF_RAW
188 tristate 'raw table support (required for TRACE)' 188 tristate 'raw table support (required for TRACE)'
189 depends on NETFILTER_ADVANCED
190 help 189 help
191 This option adds a `raw' table to ip6tables. This table is the very 190 This option adds a `raw' table to ip6tables. This table is the very
192 first in the netfilter framework and hooks in at the PREROUTING 191 first in the netfilter framework and hooks in at the PREROUTING
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8473016bba4a..b582a0a0f1c5 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -77,7 +77,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
77 const struct in6_addr *dest); 77 const struct in6_addr *dest);
78static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 78static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
79static unsigned int ip6_default_advmss(const struct dst_entry *dst); 79static unsigned int ip6_default_advmss(const struct dst_entry *dst);
80static unsigned int ip6_default_mtu(const struct dst_entry *dst); 80static unsigned int ip6_mtu(const struct dst_entry *dst);
81static struct dst_entry *ip6_negative_advice(struct dst_entry *); 81static struct dst_entry *ip6_negative_advice(struct dst_entry *);
82static void ip6_dst_destroy(struct dst_entry *); 82static void ip6_dst_destroy(struct dst_entry *);
83static void ip6_dst_ifdown(struct dst_entry *, 83static void ip6_dst_ifdown(struct dst_entry *,
@@ -144,7 +144,7 @@ static struct dst_ops ip6_dst_ops_template = {
144 .gc_thresh = 1024, 144 .gc_thresh = 1024,
145 .check = ip6_dst_check, 145 .check = ip6_dst_check,
146 .default_advmss = ip6_default_advmss, 146 .default_advmss = ip6_default_advmss,
147 .default_mtu = ip6_default_mtu, 147 .mtu = ip6_mtu,
148 .cow_metrics = ipv6_cow_metrics, 148 .cow_metrics = ipv6_cow_metrics,
149 .destroy = ip6_dst_destroy, 149 .destroy = ip6_dst_destroy,
150 .ifdown = ip6_dst_ifdown, 150 .ifdown = ip6_dst_ifdown,
@@ -155,9 +155,11 @@ static struct dst_ops ip6_dst_ops_template = {
155 .neigh_lookup = ip6_neigh_lookup, 155 .neigh_lookup = ip6_neigh_lookup,
156}; 156};
157 157
158static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst) 158static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
159{ 159{
160 return 0; 160 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
161
162 return mtu ? : dst->dev->mtu;
161} 163}
162 164
163static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) 165static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -175,7 +177,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
175 .protocol = cpu_to_be16(ETH_P_IPV6), 177 .protocol = cpu_to_be16(ETH_P_IPV6),
176 .destroy = ip6_dst_destroy, 178 .destroy = ip6_dst_destroy,
177 .check = ip6_dst_check, 179 .check = ip6_dst_check,
178 .default_mtu = ip6_blackhole_default_mtu, 180 .mtu = ip6_blackhole_mtu,
179 .default_advmss = ip6_default_advmss, 181 .default_advmss = ip6_default_advmss,
180 .update_pmtu = ip6_rt_blackhole_update_pmtu, 182 .update_pmtu = ip6_rt_blackhole_update_pmtu,
181 .cow_metrics = ip6_rt_blackhole_cow_metrics, 183 .cow_metrics = ip6_rt_blackhole_cow_metrics,
@@ -726,7 +728,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
726 int attempts = !in_softirq(); 728 int attempts = !in_softirq();
727 729
728 if (!(rt->rt6i_flags&RTF_GATEWAY)) { 730 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
729 if (rt->rt6i_dst.plen != 128 && 731 if (ort->rt6i_dst.plen != 128 &&
730 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) 732 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
731 rt->rt6i_flags |= RTF_ANYCAST; 733 rt->rt6i_flags |= RTF_ANYCAST;
732 ipv6_addr_copy(&rt->rt6i_gateway, daddr); 734 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
@@ -1041,10 +1043,15 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1041 return mtu; 1043 return mtu;
1042} 1044}
1043 1045
1044static unsigned int ip6_default_mtu(const struct dst_entry *dst) 1046static unsigned int ip6_mtu(const struct dst_entry *dst)
1045{ 1047{
1046 unsigned int mtu = IPV6_MIN_MTU;
1047 struct inet6_dev *idev; 1048 struct inet6_dev *idev;
1049 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1050
1051 if (mtu)
1052 return mtu;
1053
1054 mtu = IPV6_MIN_MTU;
1048 1055
1049 rcu_read_lock(); 1056 rcu_read_lock();
1050 idev = __in6_dev_get(dst->dev); 1057 idev = __in6_dev_get(dst->dev);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a7a18602a046..96f3623618e3 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
263 if (register_netdevice(dev) < 0) 263 if (register_netdevice(dev) < 0)
264 goto failed_free; 264 goto failed_free;
265 265
266 strcpy(nt->parms.name, dev->name);
267
266 dev_hold(dev); 268 dev_hold(dev);
267 269
268 ipip6_tunnel_link(sitn, nt); 270 ipip6_tunnel_link(sitn, nt);
@@ -1144,7 +1146,6 @@ static int ipip6_tunnel_init(struct net_device *dev)
1144 struct ip_tunnel *tunnel = netdev_priv(dev); 1146 struct ip_tunnel *tunnel = netdev_priv(dev);
1145 1147
1146 tunnel->dev = dev; 1148 tunnel->dev = dev;
1147 strcpy(tunnel->parms.name, dev->name);
1148 1149
1149 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); 1150 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1150 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); 1151 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -1207,6 +1208,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
1207static int __net_init sit_init_net(struct net *net) 1208static int __net_init sit_init_net(struct net *net)
1208{ 1209{
1209 struct sit_net *sitn = net_generic(net, sit_net_id); 1210 struct sit_net *sitn = net_generic(net, sit_net_id);
1211 struct ip_tunnel *t;
1210 int err; 1212 int err;
1211 1213
1212 sitn->tunnels[0] = sitn->tunnels_wc; 1214 sitn->tunnels[0] = sitn->tunnels_wc;
@@ -1231,6 +1233,9 @@ static int __net_init sit_init_net(struct net *net)
1231 if ((err = register_netdev(sitn->fb_tunnel_dev))) 1233 if ((err = register_netdev(sitn->fb_tunnel_dev)))
1232 goto err_reg_dev; 1234 goto err_reg_dev;
1233 1235
1236 t = netdev_priv(sitn->fb_tunnel_dev);
1237
1238 strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
1234 return 0; 1239 return 0;
1235 1240
1236err_reg_dev: 1241err_reg_dev:
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 36131d122a6f..2dea4bb7b54a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1255,6 +1255,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1255 if (!want_cookie || tmp_opt.tstamp_ok) 1255 if (!want_cookie || tmp_opt.tstamp_ok)
1256 TCP_ECN_create_request(req, tcp_hdr(skb)); 1256 TCP_ECN_create_request(req, tcp_hdr(skb));
1257 1257
1258 treq->iif = sk->sk_bound_dev_if;
1259
1260 /* So that link locals have meaning */
1261 if (!sk->sk_bound_dev_if &&
1262 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1263 treq->iif = inet6_iif(skb);
1264
1258 if (!isn) { 1265 if (!isn) {
1259 struct inet_peer *peer = NULL; 1266 struct inet_peer *peer = NULL;
1260 1267
@@ -1264,12 +1271,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1264 atomic_inc(&skb->users); 1271 atomic_inc(&skb->users);
1265 treq->pktopts = skb; 1272 treq->pktopts = skb;
1266 } 1273 }
1267 treq->iif = sk->sk_bound_dev_if;
1268
1269 /* So that link locals have meaning */
1270 if (!sk->sk_bound_dev_if &&
1271 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1272 treq->iif = inet6_iif(skb);
1273 1274
1274 if (want_cookie) { 1275 if (want_cookie) {
1275 isn = cookie_v6_init_sequence(sk, skb, &req->mss); 1276 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 846f4757eb8d..8c2541915183 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -340,7 +340,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
340 struct ipv6_pinfo *np = inet6_sk(sk); 340 struct ipv6_pinfo *np = inet6_sk(sk);
341 struct inet_sock *inet = inet_sk(sk); 341 struct inet_sock *inet = inet_sk(sk);
342 struct sk_buff *skb; 342 struct sk_buff *skb;
343 unsigned int ulen; 343 unsigned int ulen, copied;
344 int peeked; 344 int peeked;
345 int err; 345 int err;
346 int is_udplite = IS_UDPLITE(sk); 346 int is_udplite = IS_UDPLITE(sk);
@@ -363,9 +363,10 @@ try_again:
363 goto out; 363 goto out;
364 364
365 ulen = skb->len - sizeof(struct udphdr); 365 ulen = skb->len - sizeof(struct udphdr);
366 if (len > ulen) 366 copied = len;
367 len = ulen; 367 if (copied > ulen)
368 else if (len < ulen) 368 copied = ulen;
369 else if (copied < ulen)
369 msg->msg_flags |= MSG_TRUNC; 370 msg->msg_flags |= MSG_TRUNC;
370 371
371 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 372 is_udp4 = (skb->protocol == htons(ETH_P_IP));
@@ -376,14 +377,14 @@ try_again:
376 * coverage checksum (UDP-Lite), do it before the copy. 377 * coverage checksum (UDP-Lite), do it before the copy.
377 */ 378 */
378 379
379 if (len < ulen || UDP_SKB_CB(skb)->partial_cov) { 380 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
380 if (udp_lib_checksum_complete(skb)) 381 if (udp_lib_checksum_complete(skb))
381 goto csum_copy_err; 382 goto csum_copy_err;
382 } 383 }
383 384
384 if (skb_csum_unnecessary(skb)) 385 if (skb_csum_unnecessary(skb))
385 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 386 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
386 msg->msg_iov,len); 387 msg->msg_iov, copied );
387 else { 388 else {
388 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); 389 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
389 if (err == -EINVAL) 390 if (err == -EINVAL)
@@ -432,7 +433,7 @@ try_again:
432 datagram_recv_ctl(sk, msg, skb); 433 datagram_recv_ctl(sk, msg, skb);
433 } 434 }
434 435
435 err = len; 436 err = copied;
436 if (flags & MSG_TRUNC) 437 if (flags & MSG_TRUNC)
437 err = ulen; 438 err = ulen;
438 439
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index bf8d50c67931..89ff8c67943e 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -756,9 +756,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
756 goto error; 756 goto error;
757 } 757 }
758 758
759 /* Point to L2TP header */
760 optr = ptr = skb->data;
761
762 /* Trace packet contents, if enabled */ 759 /* Trace packet contents, if enabled */
763 if (tunnel->debug & L2TP_MSG_DATA) { 760 if (tunnel->debug & L2TP_MSG_DATA) {
764 length = min(32u, skb->len); 761 length = min(32u, skb->len);
@@ -769,12 +766,15 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
769 766
770 offset = 0; 767 offset = 0;
771 do { 768 do {
772 printk(" %02X", ptr[offset]); 769 printk(" %02X", skb->data[offset]);
773 } while (++offset < length); 770 } while (++offset < length);
774 771
775 printk("\n"); 772 printk("\n");
776 } 773 }
777 774
775 /* Point to L2TP header */
776 optr = ptr = skb->data;
777
778 /* Get L2TP header flags */ 778 /* Get L2TP header flags */
779 hdrflags = ntohs(*(__be16 *) ptr); 779 hdrflags = ntohs(*(__be16 *) ptr);
780 780
@@ -1072,7 +1072,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1072 1072
1073 /* Get routing info from the tunnel socket */ 1073 /* Get routing info from the tunnel socket */
1074 skb_dst_drop(skb); 1074 skb_dst_drop(skb);
1075 skb_dst_set(skb, dst_clone(__sk_dst_get(sk))); 1075 skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
1076 1076
1077 inet = inet_sk(sk); 1077 inet = inet_sk(sk);
1078 fl = &inet->cork.fl; 1078 fl = &inet->cork.fl;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b3f65520e7a7..2e4b961648d4 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -161,6 +161,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
161 return -ENOENT; 161 return -ENOENT;
162 } 162 }
163 163
164 /* if we're already stopping ignore any new requests to stop */
165 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
166 spin_unlock_bh(&sta->lock);
167 return -EALREADY;
168 }
169
164 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { 170 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
165 /* not even started yet! */ 171 /* not even started yet! */
166 ieee80211_assign_tid_tx(sta, tid, NULL); 172 ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -169,6 +175,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
169 return 0; 175 return 0;
170 } 176 }
171 177
178 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
179
172 spin_unlock_bh(&sta->lock); 180 spin_unlock_bh(&sta->lock);
173 181
174#ifdef CONFIG_MAC80211_HT_DEBUG 182#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -176,8 +184,6 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
176 sta->sta.addr, tid); 184 sta->sta.addr, tid);
177#endif /* CONFIG_MAC80211_HT_DEBUG */ 185#endif /* CONFIG_MAC80211_HT_DEBUG */
178 186
179 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
180
181 del_timer_sync(&tid_tx->addba_resp_timer); 187 del_timer_sync(&tid_tx->addba_resp_timer);
182 188
183 /* 189 /*
@@ -187,6 +193,20 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
187 */ 193 */
188 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); 194 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
189 195
196 /*
197 * There might be a few packets being processed right now (on
198 * another CPU) that have already gotten past the aggregation
199 * check when it was still OPERATIONAL and consequently have
200 * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
201 * call into the driver at the same time or even before the
202 * TX paths calls into it, which could confuse the driver.
203 *
204 * Wait for all currently running TX paths to finish before
205 * telling the driver. New packets will not go through since
206 * the aggregation session is no longer OPERATIONAL.
207 */
208 synchronize_net();
209
190 tid_tx->stop_initiator = initiator; 210 tid_tx->stop_initiator = initiator;
191 tid_tx->tx_stop = tx; 211 tid_tx->tx_stop = tx;
192 212
@@ -283,6 +303,38 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
283 __release(agg_queue); 303 __release(agg_queue);
284} 304}
285 305
306/*
307 * splice packets from the STA's pending to the local pending,
308 * requires a call to ieee80211_agg_splice_finish later
309 */
310static void __acquires(agg_queue)
311ieee80211_agg_splice_packets(struct ieee80211_local *local,
312 struct tid_ampdu_tx *tid_tx, u16 tid)
313{
314 int queue = ieee80211_ac_from_tid(tid);
315 unsigned long flags;
316
317 ieee80211_stop_queue_agg(local, tid);
318
319 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
320 " from the pending queue\n", tid))
321 return;
322
323 if (!skb_queue_empty(&tid_tx->pending)) {
324 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
325 /* copy over remaining packets */
326 skb_queue_splice_tail_init(&tid_tx->pending,
327 &local->pending[queue]);
328 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
329 }
330}
331
332static void __releases(agg_queue)
333ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
334{
335 ieee80211_wake_queue_agg(local, tid);
336}
337
286void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) 338void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
287{ 339{
288 struct tid_ampdu_tx *tid_tx; 340 struct tid_ampdu_tx *tid_tx;
@@ -294,19 +346,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
294 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 346 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
295 347
296 /* 348 /*
297 * While we're asking the driver about the aggregation, 349 * Start queuing up packets for this aggregation session.
298 * stop the AC queue so that we don't have to worry 350 * We're going to release them once the driver is OK with
299 * about frames that came in while we were doing that, 351 * that.
300 * which would require us to put them to the AC pending
301 * afterwards which just makes the code more complex.
302 */ 352 */
303 ieee80211_stop_queue_agg(local, tid);
304
305 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); 353 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
306 354
307 /* 355 /*
308 * make sure no packets are being processed to get 356 * Make sure no packets are being processed. This ensures that
309 * valid starting sequence number 357 * we have a valid starting sequence number and that in-flight
358 * packets have been flushed out and no packets for this TID
359 * will go into the driver during the ampdu_action call.
310 */ 360 */
311 synchronize_net(); 361 synchronize_net();
312 362
@@ -320,17 +370,15 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
320 " tid %d\n", tid); 370 " tid %d\n", tid);
321#endif 371#endif
322 spin_lock_bh(&sta->lock); 372 spin_lock_bh(&sta->lock);
373 ieee80211_agg_splice_packets(local, tid_tx, tid);
323 ieee80211_assign_tid_tx(sta, tid, NULL); 374 ieee80211_assign_tid_tx(sta, tid, NULL);
375 ieee80211_agg_splice_finish(local, tid);
324 spin_unlock_bh(&sta->lock); 376 spin_unlock_bh(&sta->lock);
325 377
326 ieee80211_wake_queue_agg(local, tid);
327 kfree_rcu(tid_tx, rcu_head); 378 kfree_rcu(tid_tx, rcu_head);
328 return; 379 return;
329 } 380 }
330 381
331 /* we can take packets again now */
332 ieee80211_wake_queue_agg(local, tid);
333
334 /* activate the timer for the recipient's addBA response */ 382 /* activate the timer for the recipient's addBA response */
335 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); 383 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
336#ifdef CONFIG_MAC80211_HT_DEBUG 384#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -446,38 +494,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
446} 494}
447EXPORT_SYMBOL(ieee80211_start_tx_ba_session); 495EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
448 496
449/*
450 * splice packets from the STA's pending to the local pending,
451 * requires a call to ieee80211_agg_splice_finish later
452 */
453static void __acquires(agg_queue)
454ieee80211_agg_splice_packets(struct ieee80211_local *local,
455 struct tid_ampdu_tx *tid_tx, u16 tid)
456{
457 int queue = ieee80211_ac_from_tid(tid);
458 unsigned long flags;
459
460 ieee80211_stop_queue_agg(local, tid);
461
462 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
463 " from the pending queue\n", tid))
464 return;
465
466 if (!skb_queue_empty(&tid_tx->pending)) {
467 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
468 /* copy over remaining packets */
469 skb_queue_splice_tail_init(&tid_tx->pending,
470 &local->pending[queue]);
471 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
472 }
473}
474
475static void __releases(agg_queue)
476ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
477{
478 ieee80211_wake_queue_agg(local, tid);
479}
480
481static void ieee80211_agg_tx_operational(struct ieee80211_local *local, 497static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
482 struct sta_info *sta, u16 tid) 498 struct sta_info *sta, u16 tid)
483{ 499{
@@ -757,11 +773,27 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
757 goto out; 773 goto out;
758 } 774 }
759 775
760 del_timer(&tid_tx->addba_resp_timer); 776 del_timer_sync(&tid_tx->addba_resp_timer);
761 777
762#ifdef CONFIG_MAC80211_HT_DEBUG 778#ifdef CONFIG_MAC80211_HT_DEBUG
763 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); 779 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
764#endif 780#endif
781
782 /*
783 * addba_resp_timer may have fired before we got here, and
784 * caused WANT_STOP to be set. If the stop then was already
785 * processed further, STOPPING might be set.
786 */
787 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
788 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
789#ifdef CONFIG_MAC80211_HT_DEBUG
790 printk(KERN_DEBUG
791 "got addBA resp for tid %d but we already gave up\n",
792 tid);
793#endif
794 goto out;
795 }
796
765 /* 797 /*
766 * IEEE 802.11-2007 7.3.1.14: 798 * IEEE 802.11-2007 7.3.1.14:
767 * In an ADDBA Response frame, when the Status Code field 799 * In an ADDBA Response frame, when the Status Code field
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index c5f341798c16..3110cbdc501b 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -274,9 +274,9 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
274 274
275 PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack"); 275 PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
276 276
277 PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
278 "3839 bytes");
279 PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: " 277 PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
278 "3839 bytes");
279 PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
280 "7935 bytes"); 280 "7935 bytes");
281 281
282 /* 282 /*
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index d999bf3b84e1..cae443563ec9 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -757,6 +757,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
757 if (!local->int_scan_req) 757 if (!local->int_scan_req)
758 return -ENOMEM; 758 return -ENOMEM;
759 759
760 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
761 if (!local->hw.wiphy->bands[band])
762 continue;
763 local->int_scan_req->rates[band] = (u32) -1;
764 }
765
760 /* if low-level driver supports AP, we also support VLAN */ 766 /* if low-level driver supports AP, we also support VLAN */
761 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { 767 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
762 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); 768 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 72c8bea81a6c..b1b1bb368f70 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1487,6 +1487,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1487 int i, j, err; 1487 int i, j, err;
1488 bool have_higher_than_11mbit = false; 1488 bool have_higher_than_11mbit = false;
1489 u16 ap_ht_cap_flags; 1489 u16 ap_ht_cap_flags;
1490 int min_rate = INT_MAX, min_rate_index = -1;
1490 1491
1491 /* AssocResp and ReassocResp have identical structure */ 1492 /* AssocResp and ReassocResp have identical structure */
1492 1493
@@ -1553,6 +1554,10 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1553 rates |= BIT(j); 1554 rates |= BIT(j);
1554 if (is_basic) 1555 if (is_basic)
1555 basic_rates |= BIT(j); 1556 basic_rates |= BIT(j);
1557 if (rate < min_rate) {
1558 min_rate = rate;
1559 min_rate_index = j;
1560 }
1556 break; 1561 break;
1557 } 1562 }
1558 } 1563 }
@@ -1570,11 +1575,25 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1570 rates |= BIT(j); 1575 rates |= BIT(j);
1571 if (is_basic) 1576 if (is_basic)
1572 basic_rates |= BIT(j); 1577 basic_rates |= BIT(j);
1578 if (rate < min_rate) {
1579 min_rate = rate;
1580 min_rate_index = j;
1581 }
1573 break; 1582 break;
1574 } 1583 }
1575 } 1584 }
1576 } 1585 }
1577 1586
1587 /*
1588 * some buggy APs don't advertise basic_rates. use the lowest
1589 * supported rate instead.
1590 */
1591 if (unlikely(!basic_rates) && min_rate_index >= 0) {
1592 printk(KERN_DEBUG "%s: No basic rates in AssocResp. "
1593 "Using min supported rate instead.\n", sdata->name);
1594 basic_rates = BIT(min_rate_index);
1595 }
1596
1578 sta->sta.supp_rates[wk->chan->band] = rates; 1597 sta->sta.supp_rates[wk->chan->band] = rates;
1579 sdata->vif.bss_conf.basic_rates = basic_rates; 1598 sdata->vif.bss_conf.basic_rates = basic_rates;
1580 1599
@@ -2269,6 +2288,7 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
2269 2288
2270 cancel_work_sync(&ifmgd->request_smps_work); 2289 cancel_work_sync(&ifmgd->request_smps_work);
2271 2290
2291 cancel_work_sync(&ifmgd->monitor_work);
2272 cancel_work_sync(&ifmgd->beacon_connection_loss_work); 2292 cancel_work_sync(&ifmgd->beacon_connection_loss_work);
2273 if (del_timer_sync(&ifmgd->timer)) 2293 if (del_timer_sync(&ifmgd->timer))
2274 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); 2294 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
@@ -2277,7 +2297,6 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
2277 if (del_timer_sync(&ifmgd->chswitch_timer)) 2297 if (del_timer_sync(&ifmgd->chswitch_timer))
2278 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); 2298 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
2279 2299
2280 cancel_work_sync(&ifmgd->monitor_work);
2281 /* these will just be re-established on connection */ 2300 /* these will just be re-established on connection */
2282 del_timer_sync(&ifmgd->conn_mon_timer); 2301 del_timer_sync(&ifmgd->conn_mon_timer);
2283 del_timer_sync(&ifmgd->bcn_mon_timer); 2302 del_timer_sync(&ifmgd->bcn_mon_timer);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index bb53726cb04a..fb123e2e081a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -141,8 +141,9 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
141 pos++; 141 pos++;
142 142
143 /* IEEE80211_RADIOTAP_RATE */ 143 /* IEEE80211_RADIOTAP_RATE */
144 if (status->flag & RX_FLAG_HT) { 144 if (!rate || status->flag & RX_FLAG_HT) {
145 /* 145 /*
146 * Without rate information don't add it. If we have,
146 * MCS information is a separate field in radiotap, 147 * MCS information is a separate field in radiotap,
147 * added below. The byte here is needed as padding 148 * added below. The byte here is needed as padding
148 * for the channel though, so initialise it to 0. 149 * for the channel though, so initialise it to 0.
@@ -163,12 +164,14 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
163 else if (status->flag & RX_FLAG_HT) 164 else if (status->flag & RX_FLAG_HT)
164 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, 165 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
165 pos); 166 pos);
166 else if (rate->flags & IEEE80211_RATE_ERP_G) 167 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
167 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ, 168 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
168 pos); 169 pos);
169 else 170 else if (rate)
170 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, 171 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
171 pos); 172 pos);
173 else
174 put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos);
172 pos += 2; 175 pos += 2;
173 176
174 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 177 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index ce962d2c8782..8eaa746ec7a2 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1354,12 +1354,12 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1354 * Use MoreData flag to indicate whether there are 1354 * Use MoreData flag to indicate whether there are
1355 * more buffered frames for this STA 1355 * more buffered frames for this STA
1356 */ 1356 */
1357 if (!more_data) 1357 if (more_data || !skb_queue_empty(&frames))
1358 hdr->frame_control &=
1359 cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1360 else
1361 hdr->frame_control |= 1358 hdr->frame_control |=
1362 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1359 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1360 else
1361 hdr->frame_control &=
1362 cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1363 1363
1364 if (ieee80211_is_data_qos(hdr->frame_control) || 1364 if (ieee80211_is_data_qos(hdr->frame_control) ||
1365 ieee80211_is_qos_nullfunc(hdr->frame_control)) 1365 ieee80211_is_qos_nullfunc(hdr->frame_control))
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 80de436eae20..16518f386117 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -260,7 +260,7 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
260 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 260 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
261 struct ieee80211_radiotap_header *rthdr; 261 struct ieee80211_radiotap_header *rthdr;
262 unsigned char *pos; 262 unsigned char *pos;
263 __le16 txflags; 263 u16 txflags;
264 264
265 rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len); 265 rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
266 266
@@ -290,13 +290,13 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
290 txflags = 0; 290 txflags = 0;
291 if (!(info->flags & IEEE80211_TX_STAT_ACK) && 291 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
292 !is_multicast_ether_addr(hdr->addr1)) 292 !is_multicast_ether_addr(hdr->addr1))
293 txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL); 293 txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
294 294
295 if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || 295 if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
296 (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) 296 (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
297 txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS); 297 txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
298 else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) 298 else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
299 txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS); 299 txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
300 300
301 put_unaligned_le16(txflags, pos); 301 put_unaligned_le16(txflags, pos);
302 pos += 2; 302 pos += 2;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 51e256c5fb78..d5230ecc784d 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -881,6 +881,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
881 skb = ieee80211_probereq_get(&local->hw, &sdata->vif, 881 skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
882 ssid, ssid_len, 882 ssid, ssid_len,
883 buf, buf_len); 883 buf, buf_len);
884 if (!skb)
885 goto out;
884 886
885 if (dst) { 887 if (dst) {
886 mgmt = (struct ieee80211_mgmt *) skb->data; 888 mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -889,6 +891,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
889 } 891 }
890 892
891 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 893 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
894
895 out:
892 kfree(buf); 896 kfree(buf);
893 897
894 return skb; 898 return skb;
@@ -1035,7 +1039,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1035 struct ieee80211_sub_if_data, 1039 struct ieee80211_sub_if_data,
1036 u.ap); 1040 u.ap);
1037 1041
1038 memset(&sta->sta.drv_priv, 0, hw->sta_data_size);
1039 WARN_ON(drv_sta_add(local, sdata, &sta->sta)); 1042 WARN_ON(drv_sta_add(local, sdata, &sta->sta));
1040 } 1043 }
1041 } 1044 }
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 8260b13d93c9..d5597b759ba3 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -201,7 +201,6 @@ config NF_CONNTRACK_BROADCAST
201 201
202config NF_CONNTRACK_NETBIOS_NS 202config NF_CONNTRACK_NETBIOS_NS
203 tristate "NetBIOS name service protocol support" 203 tristate "NetBIOS name service protocol support"
204 depends on NETFILTER_ADVANCED
205 select NF_CONNTRACK_BROADCAST 204 select NF_CONNTRACK_BROADCAST
206 help 205 help
207 NetBIOS name service requests are sent as broadcast messages from an 206 NetBIOS name service requests are sent as broadcast messages from an
@@ -542,7 +541,6 @@ config NETFILTER_XT_TARGET_NOTRACK
542 tristate '"NOTRACK" target support' 541 tristate '"NOTRACK" target support'
543 depends on IP_NF_RAW || IP6_NF_RAW 542 depends on IP_NF_RAW || IP6_NF_RAW
544 depends on NF_CONNTRACK 543 depends on NF_CONNTRACK
545 depends on NETFILTER_ADVANCED
546 help 544 help
547 The NOTRACK target allows a select rule to specify 545 The NOTRACK target allows a select rule to specify
548 which packets *not* to enter the conntrack/NAT 546 which packets *not* to enter the conntrack/NAT
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 6ee10f5d59bd..37d667e3f6f8 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -158,7 +158,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
158 const struct ip_set_hash *h = set->data; 158 const struct ip_set_hash *h = set->data;
159 ipset_adtfn adtfn = set->variant->adt[adt]; 159 ipset_adtfn adtfn = set->variant->adt[adt];
160 struct hash_ipport4_elem data = { }; 160 struct hash_ipport4_elem data = { };
161 u32 ip, ip_to, p = 0, port, port_to; 161 u32 ip, ip_to = 0, p = 0, port, port_to;
162 u32 timeout = h->timeout; 162 u32 timeout = h->timeout;
163 bool with_ports = false; 163 bool with_ports = false;
164 int ret; 164 int ret;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index fb90e344e907..e69e2718fbe1 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -162,7 +162,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
162 const struct ip_set_hash *h = set->data; 162 const struct ip_set_hash *h = set->data;
163 ipset_adtfn adtfn = set->variant->adt[adt]; 163 ipset_adtfn adtfn = set->variant->adt[adt];
164 struct hash_ipportip4_elem data = { }; 164 struct hash_ipportip4_elem data = { };
165 u32 ip, ip_to, p = 0, port, port_to; 165 u32 ip, ip_to = 0, p = 0, port, port_to;
166 u32 timeout = h->timeout; 166 u32 timeout = h->timeout;
167 bool with_ports = false; 167 bool with_ports = false;
168 int ret; 168 int ret;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index deb3e3dfa5fc..64199b4e93c9 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -184,7 +184,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
184 const struct ip_set_hash *h = set->data; 184 const struct ip_set_hash *h = set->data;
185 ipset_adtfn adtfn = set->variant->adt[adt]; 185 ipset_adtfn adtfn = set->variant->adt[adt];
186 struct hash_ipportnet4_elem data = { .cidr = HOST_MASK }; 186 struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
187 u32 ip, ip_to, p = 0, port, port_to; 187 u32 ip, ip_to = 0, p = 0, port, port_to;
188 u32 ip2_from = 0, ip2_to, ip2_last, ip2; 188 u32 ip2_from = 0, ip2_to, ip2_last, ip2;
189 u32 timeout = h->timeout; 189 u32 timeout = h->timeout;
190 bool with_ports = false; 190 bool with_ports = false;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 6b368be937c6..b62c4148b921 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -27,22 +27,17 @@
27 27
28static DEFINE_MUTEX(nf_ct_ecache_mutex); 28static DEFINE_MUTEX(nf_ct_ecache_mutex);
29 29
30struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
31EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
32
33struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
34EXPORT_SYMBOL_GPL(nf_expect_event_cb);
35
36/* deliver cached events and clear cache entry - must be called with locally 30/* deliver cached events and clear cache entry - must be called with locally
37 * disabled softirqs */ 31 * disabled softirqs */
38void nf_ct_deliver_cached_events(struct nf_conn *ct) 32void nf_ct_deliver_cached_events(struct nf_conn *ct)
39{ 33{
34 struct net *net = nf_ct_net(ct);
40 unsigned long events; 35 unsigned long events;
41 struct nf_ct_event_notifier *notify; 36 struct nf_ct_event_notifier *notify;
42 struct nf_conntrack_ecache *e; 37 struct nf_conntrack_ecache *e;
43 38
44 rcu_read_lock(); 39 rcu_read_lock();
45 notify = rcu_dereference(nf_conntrack_event_cb); 40 notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
46 if (notify == NULL) 41 if (notify == NULL)
47 goto out_unlock; 42 goto out_unlock;
48 43
@@ -83,19 +78,20 @@ out_unlock:
83} 78}
84EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); 79EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
85 80
86int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new) 81int nf_conntrack_register_notifier(struct net *net,
82 struct nf_ct_event_notifier *new)
87{ 83{
88 int ret = 0; 84 int ret = 0;
89 struct nf_ct_event_notifier *notify; 85 struct nf_ct_event_notifier *notify;
90 86
91 mutex_lock(&nf_ct_ecache_mutex); 87 mutex_lock(&nf_ct_ecache_mutex);
92 notify = rcu_dereference_protected(nf_conntrack_event_cb, 88 notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
93 lockdep_is_held(&nf_ct_ecache_mutex)); 89 lockdep_is_held(&nf_ct_ecache_mutex));
94 if (notify != NULL) { 90 if (notify != NULL) {
95 ret = -EBUSY; 91 ret = -EBUSY;
96 goto out_unlock; 92 goto out_unlock;
97 } 93 }
98 RCU_INIT_POINTER(nf_conntrack_event_cb, new); 94 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
99 mutex_unlock(&nf_ct_ecache_mutex); 95 mutex_unlock(&nf_ct_ecache_mutex);
100 return ret; 96 return ret;
101 97
@@ -105,32 +101,34 @@ out_unlock:
105} 101}
106EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier); 102EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
107 103
108void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new) 104void nf_conntrack_unregister_notifier(struct net *net,
105 struct nf_ct_event_notifier *new)
109{ 106{
110 struct nf_ct_event_notifier *notify; 107 struct nf_ct_event_notifier *notify;
111 108
112 mutex_lock(&nf_ct_ecache_mutex); 109 mutex_lock(&nf_ct_ecache_mutex);
113 notify = rcu_dereference_protected(nf_conntrack_event_cb, 110 notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
114 lockdep_is_held(&nf_ct_ecache_mutex)); 111 lockdep_is_held(&nf_ct_ecache_mutex));
115 BUG_ON(notify != new); 112 BUG_ON(notify != new);
116 RCU_INIT_POINTER(nf_conntrack_event_cb, NULL); 113 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
117 mutex_unlock(&nf_ct_ecache_mutex); 114 mutex_unlock(&nf_ct_ecache_mutex);
118} 115}
119EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); 116EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
120 117
121int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new) 118int nf_ct_expect_register_notifier(struct net *net,
119 struct nf_exp_event_notifier *new)
122{ 120{
123 int ret = 0; 121 int ret = 0;
124 struct nf_exp_event_notifier *notify; 122 struct nf_exp_event_notifier *notify;
125 123
126 mutex_lock(&nf_ct_ecache_mutex); 124 mutex_lock(&nf_ct_ecache_mutex);
127 notify = rcu_dereference_protected(nf_expect_event_cb, 125 notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
128 lockdep_is_held(&nf_ct_ecache_mutex)); 126 lockdep_is_held(&nf_ct_ecache_mutex));
129 if (notify != NULL) { 127 if (notify != NULL) {
130 ret = -EBUSY; 128 ret = -EBUSY;
131 goto out_unlock; 129 goto out_unlock;
132 } 130 }
133 RCU_INIT_POINTER(nf_expect_event_cb, new); 131 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
134 mutex_unlock(&nf_ct_ecache_mutex); 132 mutex_unlock(&nf_ct_ecache_mutex);
135 return ret; 133 return ret;
136 134
@@ -140,15 +138,16 @@ out_unlock:
140} 138}
141EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier); 139EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
142 140
143void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new) 141void nf_ct_expect_unregister_notifier(struct net *net,
142 struct nf_exp_event_notifier *new)
144{ 143{
145 struct nf_exp_event_notifier *notify; 144 struct nf_exp_event_notifier *notify;
146 145
147 mutex_lock(&nf_ct_ecache_mutex); 146 mutex_lock(&nf_ct_ecache_mutex);
148 notify = rcu_dereference_protected(nf_expect_event_cb, 147 notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
149 lockdep_is_held(&nf_ct_ecache_mutex)); 148 lockdep_is_held(&nf_ct_ecache_mutex));
150 BUG_ON(notify != new); 149 BUG_ON(notify != new);
151 RCU_INIT_POINTER(nf_expect_event_cb, NULL); 150 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
152 mutex_unlock(&nf_ct_ecache_mutex); 151 mutex_unlock(&nf_ct_ecache_mutex);
153} 152}
154EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); 153EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index e58aa9b1fe8a..ef21b221f036 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -4,7 +4,7 @@
4 * (C) 2001 by Jay Schulist <jschlst@samba.org> 4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> 5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net> 6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org> 7 * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org>
8 * 8 *
9 * Initial connection tracking via netlink development funded and 9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -2163,6 +2163,54 @@ MODULE_ALIAS("ip_conntrack_netlink");
2163MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); 2163MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
2164MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); 2164MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
2165 2165
2166static int __net_init ctnetlink_net_init(struct net *net)
2167{
2168#ifdef CONFIG_NF_CONNTRACK_EVENTS
2169 int ret;
2170
2171 ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
2172 if (ret < 0) {
2173 pr_err("ctnetlink_init: cannot register notifier.\n");
2174 goto err_out;
2175 }
2176
2177 ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
2178 if (ret < 0) {
2179 pr_err("ctnetlink_init: cannot expect register notifier.\n");
2180 goto err_unreg_notifier;
2181 }
2182#endif
2183 return 0;
2184
2185#ifdef CONFIG_NF_CONNTRACK_EVENTS
2186err_unreg_notifier:
2187 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
2188err_out:
2189 return ret;
2190#endif
2191}
2192
2193static void ctnetlink_net_exit(struct net *net)
2194{
2195#ifdef CONFIG_NF_CONNTRACK_EVENTS
2196 nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
2197 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
2198#endif
2199}
2200
2201static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
2202{
2203 struct net *net;
2204
2205 list_for_each_entry(net, net_exit_list, exit_list)
2206 ctnetlink_net_exit(net);
2207}
2208
2209static struct pernet_operations ctnetlink_net_ops = {
2210 .init = ctnetlink_net_init,
2211 .exit_batch = ctnetlink_net_exit_batch,
2212};
2213
2166static int __init ctnetlink_init(void) 2214static int __init ctnetlink_init(void)
2167{ 2215{
2168 int ret; 2216 int ret;
@@ -2180,28 +2228,15 @@ static int __init ctnetlink_init(void)
2180 goto err_unreg_subsys; 2228 goto err_unreg_subsys;
2181 } 2229 }
2182 2230
2183#ifdef CONFIG_NF_CONNTRACK_EVENTS 2231 if (register_pernet_subsys(&ctnetlink_net_ops)) {
2184 ret = nf_conntrack_register_notifier(&ctnl_notifier); 2232 pr_err("ctnetlink_init: cannot register pernet operations\n");
2185 if (ret < 0) {
2186 pr_err("ctnetlink_init: cannot register notifier.\n");
2187 goto err_unreg_exp_subsys; 2233 goto err_unreg_exp_subsys;
2188 } 2234 }
2189 2235
2190 ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp);
2191 if (ret < 0) {
2192 pr_err("ctnetlink_init: cannot expect register notifier.\n");
2193 goto err_unreg_notifier;
2194 }
2195#endif
2196
2197 return 0; 2236 return 0;
2198 2237
2199#ifdef CONFIG_NF_CONNTRACK_EVENTS
2200err_unreg_notifier:
2201 nf_conntrack_unregister_notifier(&ctnl_notifier);
2202err_unreg_exp_subsys: 2238err_unreg_exp_subsys:
2203 nfnetlink_subsys_unregister(&ctnl_exp_subsys); 2239 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
2204#endif
2205err_unreg_subsys: 2240err_unreg_subsys:
2206 nfnetlink_subsys_unregister(&ctnl_subsys); 2241 nfnetlink_subsys_unregister(&ctnl_subsys);
2207err_out: 2242err_out:
@@ -2213,11 +2248,7 @@ static void __exit ctnetlink_exit(void)
2213 pr_info("ctnetlink: unregistering from nfnetlink.\n"); 2248 pr_info("ctnetlink: unregistering from nfnetlink.\n");
2214 2249
2215 nf_ct_remove_userspace_expectations(); 2250 nf_ct_remove_userspace_expectations();
2216#ifdef CONFIG_NF_CONNTRACK_EVENTS 2251 unregister_pernet_subsys(&ctnetlink_net_ops);
2217 nf_ct_expect_unregister_notifier(&ctnl_notifier_exp);
2218 nf_conntrack_unregister_notifier(&ctnl_notifier);
2219#endif
2220
2221 nfnetlink_subsys_unregister(&ctnl_exp_subsys); 2252 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
2222 nfnetlink_subsys_unregister(&ctnl_subsys); 2253 nfnetlink_subsys_unregister(&ctnl_subsys);
2223} 2254}
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 9c24de10a657..824f184f7a9b 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -111,8 +111,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
111 struct netlbl_domaddr_map *addrmap = NULL; 111 struct netlbl_domaddr_map *addrmap = NULL;
112 struct netlbl_domaddr4_map *map4 = NULL; 112 struct netlbl_domaddr4_map *map4 = NULL;
113 struct netlbl_domaddr6_map *map6 = NULL; 113 struct netlbl_domaddr6_map *map6 = NULL;
114 const struct in_addr *addr4, *mask4;
115 const struct in6_addr *addr6, *mask6;
116 114
117 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 115 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
118 if (entry == NULL) 116 if (entry == NULL)
@@ -133,9 +131,9 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
133 INIT_LIST_HEAD(&addrmap->list6); 131 INIT_LIST_HEAD(&addrmap->list6);
134 132
135 switch (family) { 133 switch (family) {
136 case AF_INET: 134 case AF_INET: {
137 addr4 = addr; 135 const struct in_addr *addr4 = addr;
138 mask4 = mask; 136 const struct in_addr *mask4 = mask;
139 map4 = kzalloc(sizeof(*map4), GFP_ATOMIC); 137 map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
140 if (map4 == NULL) 138 if (map4 == NULL)
141 goto cfg_unlbl_map_add_failure; 139 goto cfg_unlbl_map_add_failure;
@@ -148,9 +146,11 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
148 if (ret_val != 0) 146 if (ret_val != 0)
149 goto cfg_unlbl_map_add_failure; 147 goto cfg_unlbl_map_add_failure;
150 break; 148 break;
151 case AF_INET6: 149 }
152 addr6 = addr; 150#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
153 mask6 = mask; 151 case AF_INET6: {
152 const struct in6_addr *addr6 = addr;
153 const struct in6_addr *mask6 = mask;
154 map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); 154 map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
155 if (map6 == NULL) 155 if (map6 == NULL)
156 goto cfg_unlbl_map_add_failure; 156 goto cfg_unlbl_map_add_failure;
@@ -162,11 +162,13 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
162 map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3]; 162 map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
163 ipv6_addr_copy(&map6->list.mask, mask6); 163 ipv6_addr_copy(&map6->list.mask, mask6);
164 map6->list.valid = 1; 164 map6->list.valid = 1;
165 ret_val = netlbl_af4list_add(&map4->list, 165 ret_val = netlbl_af6list_add(&map6->list,
166 &addrmap->list4); 166 &addrmap->list6);
167 if (ret_val != 0) 167 if (ret_val != 0)
168 goto cfg_unlbl_map_add_failure; 168 goto cfg_unlbl_map_add_failure;
169 break; 169 break;
170 }
171#endif /* IPv6 */
170 default: 172 default:
171 goto cfg_unlbl_map_add_failure; 173 goto cfg_unlbl_map_add_failure;
172 break; 174 break;
@@ -225,9 +227,11 @@ int netlbl_cfg_unlbl_static_add(struct net *net,
225 case AF_INET: 227 case AF_INET:
226 addr_len = sizeof(struct in_addr); 228 addr_len = sizeof(struct in_addr);
227 break; 229 break;
230#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
228 case AF_INET6: 231 case AF_INET6:
229 addr_len = sizeof(struct in6_addr); 232 addr_len = sizeof(struct in6_addr);
230 break; 233 break;
234#endif /* IPv6 */
231 default: 235 default:
232 return -EPFNOSUPPORT; 236 return -EPFNOSUPPORT;
233 } 237 }
@@ -266,9 +270,11 @@ int netlbl_cfg_unlbl_static_del(struct net *net,
266 case AF_INET: 270 case AF_INET:
267 addr_len = sizeof(struct in_addr); 271 addr_len = sizeof(struct in_addr);
268 break; 272 break;
273#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
269 case AF_INET6: 274 case AF_INET6:
270 addr_len = sizeof(struct in6_addr); 275 addr_len = sizeof(struct in6_addr);
271 break; 276 break;
277#endif /* IPv6 */
272 default: 278 default:
273 return -EPFNOSUPPORT; 279 return -EPFNOSUPPORT;
274 } 280 }
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index 4cf6dc7910e4..ec753b3ae72a 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -9,7 +9,6 @@ config RDS
9 9
10config RDS_RDMA 10config RDS_RDMA
11 tristate "RDS over Infiniband and iWARP" 11 tristate "RDS over Infiniband and iWARP"
12 select LLIST
13 depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS 12 depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
14 ---help--- 13 ---help---
15 Allow RDS to use Infiniband and iWARP as a transport. 14 Allow RDS to use Infiniband and iWARP as a transport.
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index b9493a09a870..6cd8ddfb512d 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -385,7 +385,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
385 struct gred_sched_data *q; 385 struct gred_sched_data *q;
386 386
387 if (table->tab[dp] == NULL) { 387 if (table->tab[dp] == NULL) {
388 table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL); 388 table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC);
389 if (table->tab[dp] == NULL) 389 if (table->tab[dp] == NULL)
390 return -ENOMEM; 390 return -ENOMEM;
391 } 391 }
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 6649463da1b6..d617161f8dd3 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -209,8 +209,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
209 ctl->Plog, ctl->Scell_log, 209 ctl->Plog, ctl->Scell_log,
210 nla_data(tb[TCA_RED_STAB])); 210 nla_data(tb[TCA_RED_STAB]));
211 211
212 if (skb_queue_empty(&sch->q)) 212 if (!q->qdisc->q.qlen)
213 red_end_of_idle_period(&q->parms); 213 red_start_of_idle_period(&q->parms);
214 214
215 sch_tree_unlock(sch); 215 sch_tree_unlock(sch);
216 return 0; 216 return 0;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index a3b7120fcc74..4f4c52c0eeb3 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
225 225
226 226
227static int 227static int
228__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) 228__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
229 struct net_device *dev, struct netdev_queue *txq,
230 struct neighbour *mn)
229{ 231{
230 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); 232 struct teql_sched_data *q = qdisc_priv(txq->qdisc);
231 struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
232 struct neighbour *mn = dst_get_neighbour(skb_dst(skb));
233 struct neighbour *n = q->ncache; 233 struct neighbour *n = q->ncache;
234 234
235 if (mn->tbl == NULL) 235 if (mn->tbl == NULL)
@@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
262} 262}
263 263
264static inline int teql_resolve(struct sk_buff *skb, 264static inline int teql_resolve(struct sk_buff *skb,
265 struct sk_buff *skb_res, struct net_device *dev) 265 struct sk_buff *skb_res,
266 struct net_device *dev,
267 struct netdev_queue *txq)
266{ 268{
267 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 269 struct dst_entry *dst = skb_dst(skb);
270 struct neighbour *mn;
271 int res;
272
268 if (txq->qdisc == &noop_qdisc) 273 if (txq->qdisc == &noop_qdisc)
269 return -ENODEV; 274 return -ENODEV;
270 275
271 if (dev->header_ops == NULL || 276 if (!dev->header_ops || !dst)
272 skb_dst(skb) == NULL ||
273 dst_get_neighbour(skb_dst(skb)) == NULL)
274 return 0; 277 return 0;
275 return __teql_resolve(skb, skb_res, dev); 278
279 rcu_read_lock();
280 mn = dst_get_neighbour(dst);
281 res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
282 rcu_read_unlock();
283
284 return res;
276} 285}
277 286
278static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) 287static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -307,7 +316,7 @@ restart:
307 continue; 316 continue;
308 } 317 }
309 318
310 switch (teql_resolve(skb, skb_res, slave)) { 319 switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
311 case 0: 320 case 0:
312 if (__netif_tx_trylock(slave_txq)) { 321 if (__netif_tx_trylock(slave_txq)) {
313 unsigned int length = qdisc_pkt_len(skb); 322 unsigned int length = qdisc_pkt_len(skb);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 865e68fef21c..bf812048cf6f 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
82 struct sctp_auth_bytes *key; 82 struct sctp_auth_bytes *key;
83 83
84 /* Verify that we are not going to overflow INT_MAX */ 84 /* Verify that we are not going to overflow INT_MAX */
85 if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes)) 85 if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
86 return NULL; 86 return NULL;
87 87
88 /* Allocate the shared key */ 88 /* Allocate the shared key */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index d7f97ef26590..55472c48825e 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -496,7 +496,7 @@ static int xs_nospace(struct rpc_task *task)
496 struct rpc_rqst *req = task->tk_rqstp; 496 struct rpc_rqst *req = task->tk_rqstp;
497 struct rpc_xprt *xprt = req->rq_xprt; 497 struct rpc_xprt *xprt = req->rq_xprt;
498 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 498 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
499 int ret = 0; 499 int ret = -EAGAIN;
500 500
501 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", 501 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
502 task->tk_pid, req->rq_slen - req->rq_bytes_sent, 502 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
@@ -508,7 +508,6 @@ static int xs_nospace(struct rpc_task *task)
508 /* Don't race with disconnect */ 508 /* Don't race with disconnect */
509 if (xprt_connected(xprt)) { 509 if (xprt_connected(xprt)) {
510 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { 510 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
511 ret = -EAGAIN;
512 /* 511 /*
513 * Notify TCP that we're limited by the application 512 * Notify TCP that we're limited by the application
514 * window size 513 * window size
@@ -2530,8 +2529,10 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2530 int err; 2529 int err;
2531 err = xs_init_anyaddr(args->dstaddr->sa_family, 2530 err = xs_init_anyaddr(args->dstaddr->sa_family,
2532 (struct sockaddr *)&new->srcaddr); 2531 (struct sockaddr *)&new->srcaddr);
2533 if (err != 0) 2532 if (err != 0) {
2533 xprt_free(xprt);
2534 return ERR_PTR(err); 2534 return ERR_PTR(err);
2535 }
2535 } 2536 }
2536 2537
2537 return xprt; 2538 return xprt;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 466fbcc5cf77..b595a3d8679f 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1957,6 +1957,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1957 if ((UNIXCB(skb).pid != siocb->scm->pid) || 1957 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1958 (UNIXCB(skb).cred != siocb->scm->cred)) { 1958 (UNIXCB(skb).cred != siocb->scm->cred)) {
1959 skb_queue_head(&sk->sk_receive_queue, skb); 1959 skb_queue_head(&sk->sk_receive_queue, skb);
1960 sk->sk_data_ready(sk, skb->len);
1960 break; 1961 break;
1961 } 1962 }
1962 } else { 1963 } else {
@@ -1974,6 +1975,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1974 chunk = min_t(unsigned int, skb->len, size); 1975 chunk = min_t(unsigned int, skb->len, size);
1975 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { 1976 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1976 skb_queue_head(&sk->sk_receive_queue, skb); 1977 skb_queue_head(&sk->sk_receive_queue, skb);
1978 sk->sk_data_ready(sk, skb->len);
1977 if (copied == 0) 1979 if (copied == 0)
1978 copied = -EFAULT; 1980 copied = -EFAULT;
1979 break; 1981 break;
@@ -1991,6 +1993,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1991 /* put the skb back if we didn't use it up.. */ 1993 /* put the skb back if we didn't use it up.. */
1992 if (skb->len) { 1994 if (skb->len) {
1993 skb_queue_head(&sk->sk_receive_queue, skb); 1995 skb_queue_head(&sk->sk_receive_queue, skb);
1996 sk->sk_data_ready(sk, skb->len);
1994 break; 1997 break;
1995 } 1998 }
1996 1999
@@ -2006,6 +2009,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
2006 2009
2007 /* put message back and return */ 2010 /* put message back and return */
2008 skb_queue_head(&sk->sk_receive_queue, skb); 2011 skb_queue_head(&sk->sk_receive_queue, skb);
2012 sk->sk_data_ready(sk, skb->len);
2009 break; 2013 break;
2010 } 2014 }
2011 } while (size); 2015 } while (size);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 48260c2d092a..ffafda5022c2 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -89,8 +89,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
89 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, 89 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
90 [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, 90 [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
91 91
92 [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN }, 92 [NL80211_ATTR_MAC] = { .len = ETH_ALEN },
93 [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN }, 93 [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
94 94
95 [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, 95 [NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
96 [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, 96 [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
@@ -132,8 +132,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
132 [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED }, 132 [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED },
133 [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG }, 133 [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG },
134 134
135 [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY, 135 [NL80211_ATTR_HT_CAPABILITY] = { .len = NL80211_HT_CAPABILITY_LEN },
136 .len = NL80211_HT_CAPABILITY_LEN },
137 136
138 [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 }, 137 [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 },
139 [NL80211_ATTR_IE] = { .type = NLA_BINARY, 138 [NL80211_ATTR_IE] = { .type = NLA_BINARY,
@@ -1253,6 +1252,12 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1253 goto bad_res; 1252 goto bad_res;
1254 } 1253 }
1255 1254
1255 if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
1256 netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
1257 result = -EINVAL;
1258 goto bad_res;
1259 }
1260
1256 nla_for_each_nested(nl_txq_params, 1261 nla_for_each_nested(nl_txq_params,
1257 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], 1262 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
1258 rem_txq_params) { 1263 rem_txq_params) {
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 6acba9d18cc8..3302c56f60d1 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -57,8 +57,17 @@
57#define REG_DBG_PRINT(args...) 57#define REG_DBG_PRINT(args...)
58#endif 58#endif
59 59
60static struct regulatory_request core_request_world = {
61 .initiator = NL80211_REGDOM_SET_BY_CORE,
62 .alpha2[0] = '0',
63 .alpha2[1] = '0',
64 .intersect = false,
65 .processed = true,
66 .country_ie_env = ENVIRON_ANY,
67};
68
60/* Receipt of information from last regulatory request */ 69/* Receipt of information from last regulatory request */
61static struct regulatory_request *last_request; 70static struct regulatory_request *last_request = &core_request_world;
62 71
63/* To trigger userspace events */ 72/* To trigger userspace events */
64static struct platform_device *reg_pdev; 73static struct platform_device *reg_pdev;
@@ -150,7 +159,7 @@ static char user_alpha2[2];
150module_param(ieee80211_regdom, charp, 0444); 159module_param(ieee80211_regdom, charp, 0444);
151MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); 160MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
152 161
153static void reset_regdomains(void) 162static void reset_regdomains(bool full_reset)
154{ 163{
155 /* avoid freeing static information or freeing something twice */ 164 /* avoid freeing static information or freeing something twice */
156 if (cfg80211_regdomain == cfg80211_world_regdom) 165 if (cfg80211_regdomain == cfg80211_world_regdom)
@@ -165,6 +174,13 @@ static void reset_regdomains(void)
165 174
166 cfg80211_world_regdom = &world_regdom; 175 cfg80211_world_regdom = &world_regdom;
167 cfg80211_regdomain = NULL; 176 cfg80211_regdomain = NULL;
177
178 if (!full_reset)
179 return;
180
181 if (last_request != &core_request_world)
182 kfree(last_request);
183 last_request = &core_request_world;
168} 184}
169 185
170/* 186/*
@@ -175,7 +191,7 @@ static void update_world_regdomain(const struct ieee80211_regdomain *rd)
175{ 191{
176 BUG_ON(!last_request); 192 BUG_ON(!last_request);
177 193
178 reset_regdomains(); 194 reset_regdomains(false);
179 195
180 cfg80211_world_regdom = rd; 196 cfg80211_world_regdom = rd;
181 cfg80211_regdomain = rd; 197 cfg80211_regdomain = rd;
@@ -1407,7 +1423,8 @@ static int __regulatory_hint(struct wiphy *wiphy,
1407 } 1423 }
1408 1424
1409new_request: 1425new_request:
1410 kfree(last_request); 1426 if (last_request != &core_request_world)
1427 kfree(last_request);
1411 1428
1412 last_request = pending_request; 1429 last_request = pending_request;
1413 last_request->intersect = intersect; 1430 last_request->intersect = intersect;
@@ -1577,9 +1594,6 @@ static int regulatory_hint_core(const char *alpha2)
1577{ 1594{
1578 struct regulatory_request *request; 1595 struct regulatory_request *request;
1579 1596
1580 kfree(last_request);
1581 last_request = NULL;
1582
1583 request = kzalloc(sizeof(struct regulatory_request), 1597 request = kzalloc(sizeof(struct regulatory_request),
1584 GFP_KERNEL); 1598 GFP_KERNEL);
1585 if (!request) 1599 if (!request)
@@ -1777,7 +1791,7 @@ static void restore_regulatory_settings(bool reset_user)
1777 mutex_lock(&cfg80211_mutex); 1791 mutex_lock(&cfg80211_mutex);
1778 mutex_lock(&reg_mutex); 1792 mutex_lock(&reg_mutex);
1779 1793
1780 reset_regdomains(); 1794 reset_regdomains(true);
1781 restore_alpha2(alpha2, reset_user); 1795 restore_alpha2(alpha2, reset_user);
1782 1796
1783 /* 1797 /*
@@ -2037,12 +2051,18 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2037 } 2051 }
2038 2052
2039 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); 2053 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
2054 if (!request_wiphy &&
2055 (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
2056 last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
2057 schedule_delayed_work(&reg_timeout, 0);
2058 return -ENODEV;
2059 }
2040 2060
2041 if (!last_request->intersect) { 2061 if (!last_request->intersect) {
2042 int r; 2062 int r;
2043 2063
2044 if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) { 2064 if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
2045 reset_regdomains(); 2065 reset_regdomains(false);
2046 cfg80211_regdomain = rd; 2066 cfg80211_regdomain = rd;
2047 return 0; 2067 return 0;
2048 } 2068 }
@@ -2063,7 +2083,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2063 if (r) 2083 if (r)
2064 return r; 2084 return r;
2065 2085
2066 reset_regdomains(); 2086 reset_regdomains(false);
2067 cfg80211_regdomain = rd; 2087 cfg80211_regdomain = rd;
2068 return 0; 2088 return 0;
2069 } 2089 }
@@ -2088,7 +2108,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2088 2108
2089 rd = NULL; 2109 rd = NULL;
2090 2110
2091 reset_regdomains(); 2111 reset_regdomains(false);
2092 cfg80211_regdomain = intersected_rd; 2112 cfg80211_regdomain = intersected_rd;
2093 2113
2094 return 0; 2114 return 0;
@@ -2108,7 +2128,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2108 kfree(rd); 2128 kfree(rd);
2109 rd = NULL; 2129 rd = NULL;
2110 2130
2111 reset_regdomains(); 2131 reset_regdomains(false);
2112 cfg80211_regdomain = intersected_rd; 2132 cfg80211_regdomain = intersected_rd;
2113 2133
2114 return 0; 2134 return 0;
@@ -2261,9 +2281,9 @@ void /* __init_or_exit */ regulatory_exit(void)
2261 mutex_lock(&cfg80211_mutex); 2281 mutex_lock(&cfg80211_mutex);
2262 mutex_lock(&reg_mutex); 2282 mutex_lock(&reg_mutex);
2263 2283
2264 reset_regdomains(); 2284 reset_regdomains(true);
2265 2285
2266 kfree(last_request); 2286 dev_set_uevent_suppress(&reg_pdev->dev, true);
2267 2287
2268 platform_device_unregister(reg_pdev); 2288 platform_device_unregister(reg_pdev);
2269 2289
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 0fb142410404..dc23b31594e0 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -259,17 +259,20 @@ static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
259{ 259{
260 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1); 260 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1);
261 const u8 *ie2 = cfg80211_find_ie(num, ies2, len2); 261 const u8 *ie2 = cfg80211_find_ie(num, ies2, len2);
262 int r;
263 262
263 /* equal if both missing */
264 if (!ie1 && !ie2) 264 if (!ie1 && !ie2)
265 return 0; 265 return 0;
266 if (!ie1 || !ie2) 266 /* sort missing IE before (left of) present IE */
267 if (!ie1)
267 return -1; 268 return -1;
269 if (!ie2)
270 return 1;
268 271
269 r = memcmp(ie1 + 2, ie2 + 2, min(ie1[1], ie2[1])); 272 /* sort by length first, then by contents */
270 if (r == 0 && ie1[1] != ie2[1]) 273 if (ie1[1] != ie2[1])
271 return ie2[1] - ie1[1]; 274 return ie2[1] - ie1[1];
272 return r; 275 return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
273} 276}
274 277
275static bool is_bss(struct cfg80211_bss *a, 278static bool is_bss(struct cfg80211_bss *a,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 552df27dcf53..2118d6446630 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2382,9 +2382,11 @@ static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2382 return dst_metric_advmss(dst->path); 2382 return dst_metric_advmss(dst->path);
2383} 2383}
2384 2384
2385static unsigned int xfrm_default_mtu(const struct dst_entry *dst) 2385static unsigned int xfrm_mtu(const struct dst_entry *dst)
2386{ 2386{
2387 return dst_mtu(dst->path); 2387 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2388
2389 return mtu ? : dst_mtu(dst->path);
2388} 2390}
2389 2391
2390static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr) 2392static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr)
@@ -2411,8 +2413,8 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2411 dst_ops->check = xfrm_dst_check; 2413 dst_ops->check = xfrm_dst_check;
2412 if (likely(dst_ops->default_advmss == NULL)) 2414 if (likely(dst_ops->default_advmss == NULL))
2413 dst_ops->default_advmss = xfrm_default_advmss; 2415 dst_ops->default_advmss = xfrm_default_advmss;
2414 if (likely(dst_ops->default_mtu == NULL)) 2416 if (likely(dst_ops->mtu == NULL))
2415 dst_ops->default_mtu = xfrm_default_mtu; 2417 dst_ops->mtu = xfrm_mtu;
2416 if (likely(dst_ops->negative_advice == NULL)) 2418 if (likely(dst_ops->negative_advice == NULL))
2417 dst_ops->negative_advice = xfrm_negative_advice; 2419 dst_ops->negative_advice = xfrm_negative_advice;
2418 if (likely(dst_ops->link_failure == NULL)) 2420 if (likely(dst_ops->link_failure == NULL))
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 36cc0cc39e78..b566eba4a65c 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -57,23 +57,44 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen)
57static int d_namespace_path(struct path *path, char *buf, int buflen, 57static int d_namespace_path(struct path *path, char *buf, int buflen,
58 char **name, int flags) 58 char **name, int flags)
59{ 59{
60 struct path root, tmp;
61 char *res; 60 char *res;
62 int connected, error = 0; 61 int error = 0;
62 int connected = 1;
63
64 if (path->mnt->mnt_flags & MNT_INTERNAL) {
65 /* it's not mounted anywhere */
66 res = dentry_path(path->dentry, buf, buflen);
67 *name = res;
68 if (IS_ERR(res)) {
69 *name = buf;
70 return PTR_ERR(res);
71 }
72 if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
73 strncmp(*name, "/sys/", 5) == 0) {
74 /* TODO: convert over to using a per namespace
75 * control instead of hard coded /proc
76 */
77 return prepend(name, *name - buf, "/proc", 5);
78 }
79 return 0;
80 }
63 81
64 /* Get the root we want to resolve too, released below */ 82 /* resolve paths relative to chroot?*/
65 if (flags & PATH_CHROOT_REL) { 83 if (flags & PATH_CHROOT_REL) {
66 /* resolve paths relative to chroot */ 84 struct path root;
67 get_fs_root(current->fs, &root); 85 get_fs_root(current->fs, &root);
68 } else { 86 res = __d_path(path, &root, buf, buflen);
69 /* resolve paths relative to namespace */ 87 if (res && !IS_ERR(res)) {
70 root.mnt = current->nsproxy->mnt_ns->root; 88 /* everything's fine */
71 root.dentry = root.mnt->mnt_root; 89 *name = res;
72 path_get(&root); 90 path_put(&root);
91 goto ok;
92 }
93 path_put(&root);
94 connected = 0;
73 } 95 }
74 96
75 tmp = root; 97 res = d_absolute_path(path, buf, buflen);
76 res = __d_path(path, &tmp, buf, buflen);
77 98
78 *name = res; 99 *name = res;
79 /* handle error conditions - and still allow a partial path to 100 /* handle error conditions - and still allow a partial path to
@@ -84,7 +105,10 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
84 *name = buf; 105 *name = buf;
85 goto out; 106 goto out;
86 } 107 }
108 if (!our_mnt(path->mnt))
109 connected = 0;
87 110
111ok:
88 /* Handle two cases: 112 /* Handle two cases:
89 * 1. A deleted dentry && profile is not allowing mediation of deleted 113 * 1. A deleted dentry && profile is not allowing mediation of deleted
90 * 2. On some filesystems, newly allocated dentries appear to the 114 * 2. On some filesystems, newly allocated dentries appear to the
@@ -97,10 +121,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
97 goto out; 121 goto out;
98 } 122 }
99 123
100 /* Determine if the path is connected to the expected root */ 124 /* If the path is not connected to the expected root,
101 connected = tmp.dentry == root.dentry && tmp.mnt == root.mnt;
102
103 /* If the path is not connected,
104 * check if it is a sysctl and handle specially else remove any 125 * check if it is a sysctl and handle specially else remove any
105 * leading / that __d_path may have returned. 126 * leading / that __d_path may have returned.
106 * Unless 127 * Unless
@@ -112,17 +133,9 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
112 * namespace root. 133 * namespace root.
113 */ 134 */
114 if (!connected) { 135 if (!connected) {
115 /* is the disconnect path a sysctl? */ 136 if (!(flags & PATH_CONNECT_PATH) &&
116 if (tmp.dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
117 strncmp(*name, "/sys/", 5) == 0) {
118 /* TODO: convert over to using a per namespace
119 * control instead of hard coded /proc
120 */
121 error = prepend(name, *name - buf, "/proc", 5);
122 } else if (!(flags & PATH_CONNECT_PATH) &&
123 !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) && 137 !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
124 (tmp.mnt == current->nsproxy->mnt_ns->root && 138 our_mnt(path->mnt))) {
125 tmp.dentry == tmp.mnt->mnt_root))) {
126 /* disconnected path, don't return pathname starting 139 /* disconnected path, don't return pathname starting
127 * with '/' 140 * with '/'
128 */ 141 */
@@ -133,8 +146,6 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
133 } 146 }
134 147
135out: 148out:
136 path_put(&root);
137
138 return error; 149 return error;
139} 150}
140 151
diff --git a/security/keys/encrypted-keys/Makefile b/security/keys/encrypted-keys/Makefile
index 6bc7a86d1027..d6f8433250a5 100644
--- a/security/keys/encrypted-keys/Makefile
+++ b/security/keys/encrypted-keys/Makefile
@@ -2,5 +2,9 @@
2# Makefile for encrypted keys 2# Makefile for encrypted keys
3# 3#
4 4
5obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted.o ecryptfs_format.o 5obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys.o
6obj-$(CONFIG_TRUSTED_KEYS) += masterkey_trusted.o 6
7encrypted-keys-y := encrypted.o ecryptfs_format.o
8masterkey-$(CONFIG_TRUSTED_KEYS) := masterkey_trusted.o
9masterkey-$(CONFIG_TRUSTED_KEYS)-$(CONFIG_ENCRYPTED_KEYS) := masterkey_trusted.o
10encrypted-keys-y += $(masterkey-y) $(masterkey-m-m)
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index dcc843cb0f80..41144f71d615 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -444,7 +444,7 @@ static struct key *request_master_key(struct encrypted_key_payload *epayload,
444 goto out; 444 goto out;
445 445
446 if (IS_ERR(mkey)) { 446 if (IS_ERR(mkey)) {
447 int ret = PTR_ERR(epayload); 447 int ret = PTR_ERR(mkey);
448 448
449 if (ret == -ENOTSUPP) 449 if (ret == -ENOTSUPP)
450 pr_info("encrypted_key: key %s not supported", 450 pr_info("encrypted_key: key %s not supported",
diff --git a/security/keys/encrypted-keys/encrypted.h b/security/keys/encrypted-keys/encrypted.h
index b6ade8945250..8136a2d44c63 100644
--- a/security/keys/encrypted-keys/encrypted.h
+++ b/security/keys/encrypted-keys/encrypted.h
@@ -2,7 +2,8 @@
2#define __ENCRYPTED_KEY_H 2#define __ENCRYPTED_KEY_H
3 3
4#define ENCRYPTED_DEBUG 0 4#define ENCRYPTED_DEBUG 0
5#ifdef CONFIG_TRUSTED_KEYS 5#if defined(CONFIG_TRUSTED_KEYS) || \
6 (defined(CONFIG_TRUSTED_KEYS_MODULE) && defined(CONFIG_ENCRYPTED_KEYS_MODULE))
6extern struct key *request_trusted_key(const char *trusted_desc, 7extern struct key *request_trusted_key(const char *trusted_desc,
7 u8 **master_key, size_t *master_keylen); 8 u8 **master_key, size_t *master_keylen);
8#else 9#else
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 5b366d7af3c4..69ff52c08e97 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -102,7 +102,8 @@ int user_update(struct key *key, const void *data, size_t datalen)
102 key->expiry = 0; 102 key->expiry = 0;
103 } 103 }
104 104
105 kfree_rcu(zap, rcu); 105 if (zap)
106 kfree_rcu(zap, rcu);
106 107
107error: 108error:
108 return ret; 109 return ret;
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 6aceef518a41..5c32f36ff706 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -102,9 +102,6 @@ static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
102 102
103const char *smack_cipso_option = SMACK_CIPSO_OPTION; 103const char *smack_cipso_option = SMACK_CIPSO_OPTION;
104 104
105
106#define SEQ_READ_FINISHED ((loff_t)-1)
107
108/* 105/*
109 * Values for parsing cipso rules 106 * Values for parsing cipso rules
110 * SMK_DIGITLEN: Length of a digit field in a rule. 107 * SMK_DIGITLEN: Length of a digit field in a rule.
@@ -357,10 +354,12 @@ static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
357 354
358 rc = count; 355 rc = count;
359 /* 356 /*
357 * If this is "load" as opposed to "load-self" and a new rule
358 * it needs to get added for reporting.
360 * smk_set_access returns true if there was already a rule 359 * smk_set_access returns true if there was already a rule
361 * for the subject/object pair, and false if it was new. 360 * for the subject/object pair, and false if it was new.
362 */ 361 */
363 if (!smk_set_access(rule, rule_list, rule_lock)) { 362 if (load && !smk_set_access(rule, rule_list, rule_lock)) {
364 smlp = kzalloc(sizeof(*smlp), GFP_KERNEL); 363 smlp = kzalloc(sizeof(*smlp), GFP_KERNEL);
365 if (smlp != NULL) { 364 if (smlp != NULL) {
366 smlp->smk_rule = rule; 365 smlp->smk_rule = rule;
@@ -377,12 +376,12 @@ out:
377 return rc; 376 return rc;
378} 377}
379 378
380
381/* 379/*
382 * Seq_file read operations for /smack/load 380 * Core logic for smackfs seq list operations.
383 */ 381 */
384 382
385static void *load_seq_start(struct seq_file *s, loff_t *pos) 383static void *smk_seq_start(struct seq_file *s, loff_t *pos,
384 struct list_head *head)
386{ 385{
387 struct list_head *list; 386 struct list_head *list;
388 387
@@ -390,7 +389,7 @@ static void *load_seq_start(struct seq_file *s, loff_t *pos)
390 * This is 0 the first time through. 389 * This is 0 the first time through.
391 */ 390 */
392 if (s->index == 0) 391 if (s->index == 0)
393 s->private = &smack_rule_list; 392 s->private = head;
394 393
395 if (s->private == NULL) 394 if (s->private == NULL)
396 return NULL; 395 return NULL;
@@ -404,11 +403,12 @@ static void *load_seq_start(struct seq_file *s, loff_t *pos)
404 return list; 403 return list;
405} 404}
406 405
407static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos) 406static void *smk_seq_next(struct seq_file *s, void *v, loff_t *pos,
407 struct list_head *head)
408{ 408{
409 struct list_head *list = v; 409 struct list_head *list = v;
410 410
411 if (list_is_last(list, &smack_rule_list)) { 411 if (list_is_last(list, head)) {
412 s->private = NULL; 412 s->private = NULL;
413 return NULL; 413 return NULL;
414 } 414 }
@@ -416,6 +416,25 @@ static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
416 return list->next; 416 return list->next;
417} 417}
418 418
419static void smk_seq_stop(struct seq_file *s, void *v)
420{
421 /* No-op */
422}
423
424/*
425 * Seq_file read operations for /smack/load
426 */
427
428static void *load_seq_start(struct seq_file *s, loff_t *pos)
429{
430 return smk_seq_start(s, pos, &smack_rule_list);
431}
432
433static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
434{
435 return smk_seq_next(s, v, pos, &smack_rule_list);
436}
437
419static int load_seq_show(struct seq_file *s, void *v) 438static int load_seq_show(struct seq_file *s, void *v)
420{ 439{
421 struct list_head *list = v; 440 struct list_head *list = v;
@@ -446,16 +465,11 @@ static int load_seq_show(struct seq_file *s, void *v)
446 return 0; 465 return 0;
447} 466}
448 467
449static void load_seq_stop(struct seq_file *s, void *v)
450{
451 /* No-op */
452}
453
454static const struct seq_operations load_seq_ops = { 468static const struct seq_operations load_seq_ops = {
455 .start = load_seq_start, 469 .start = load_seq_start,
456 .next = load_seq_next, 470 .next = load_seq_next,
457 .show = load_seq_show, 471 .show = load_seq_show,
458 .stop = load_seq_stop, 472 .stop = smk_seq_stop,
459}; 473};
460 474
461/** 475/**
@@ -574,28 +588,12 @@ static void smk_unlbl_ambient(char *oldambient)
574 588
575static void *cipso_seq_start(struct seq_file *s, loff_t *pos) 589static void *cipso_seq_start(struct seq_file *s, loff_t *pos)
576{ 590{
577 if (*pos == SEQ_READ_FINISHED) 591 return smk_seq_start(s, pos, &smack_known_list);
578 return NULL;
579 if (list_empty(&smack_known_list))
580 return NULL;
581
582 return smack_known_list.next;
583} 592}
584 593
585static void *cipso_seq_next(struct seq_file *s, void *v, loff_t *pos) 594static void *cipso_seq_next(struct seq_file *s, void *v, loff_t *pos)
586{ 595{
587 struct list_head *list = v; 596 return smk_seq_next(s, v, pos, &smack_known_list);
588
589 /*
590 * labels with no associated cipso value wont be printed
591 * in cipso_seq_show
592 */
593 if (list_is_last(list, &smack_known_list)) {
594 *pos = SEQ_READ_FINISHED;
595 return NULL;
596 }
597
598 return list->next;
599} 597}
600 598
601/* 599/*
@@ -634,16 +632,11 @@ static int cipso_seq_show(struct seq_file *s, void *v)
634 return 0; 632 return 0;
635} 633}
636 634
637static void cipso_seq_stop(struct seq_file *s, void *v)
638{
639 /* No-op */
640}
641
642static const struct seq_operations cipso_seq_ops = { 635static const struct seq_operations cipso_seq_ops = {
643 .start = cipso_seq_start, 636 .start = cipso_seq_start,
644 .stop = cipso_seq_stop,
645 .next = cipso_seq_next, 637 .next = cipso_seq_next,
646 .show = cipso_seq_show, 638 .show = cipso_seq_show,
639 .stop = smk_seq_stop,
647}; 640};
648 641
649/** 642/**
@@ -788,23 +781,12 @@ static const struct file_operations smk_cipso_ops = {
788 781
789static void *netlbladdr_seq_start(struct seq_file *s, loff_t *pos) 782static void *netlbladdr_seq_start(struct seq_file *s, loff_t *pos)
790{ 783{
791 if (*pos == SEQ_READ_FINISHED) 784 return smk_seq_start(s, pos, &smk_netlbladdr_list);
792 return NULL;
793 if (list_empty(&smk_netlbladdr_list))
794 return NULL;
795 return smk_netlbladdr_list.next;
796} 785}
797 786
798static void *netlbladdr_seq_next(struct seq_file *s, void *v, loff_t *pos) 787static void *netlbladdr_seq_next(struct seq_file *s, void *v, loff_t *pos)
799{ 788{
800 struct list_head *list = v; 789 return smk_seq_next(s, v, pos, &smk_netlbladdr_list);
801
802 if (list_is_last(list, &smk_netlbladdr_list)) {
803 *pos = SEQ_READ_FINISHED;
804 return NULL;
805 }
806
807 return list->next;
808} 790}
809#define BEBITS (sizeof(__be32) * 8) 791#define BEBITS (sizeof(__be32) * 8)
810 792
@@ -828,16 +810,11 @@ static int netlbladdr_seq_show(struct seq_file *s, void *v)
828 return 0; 810 return 0;
829} 811}
830 812
831static void netlbladdr_seq_stop(struct seq_file *s, void *v)
832{
833 /* No-op */
834}
835
836static const struct seq_operations netlbladdr_seq_ops = { 813static const struct seq_operations netlbladdr_seq_ops = {
837 .start = netlbladdr_seq_start, 814 .start = netlbladdr_seq_start,
838 .stop = netlbladdr_seq_stop,
839 .next = netlbladdr_seq_next, 815 .next = netlbladdr_seq_next,
840 .show = netlbladdr_seq_show, 816 .show = netlbladdr_seq_show,
817 .stop = smk_seq_stop,
841}; 818};
842 819
843/** 820/**
@@ -1405,23 +1382,14 @@ static void *load_self_seq_start(struct seq_file *s, loff_t *pos)
1405{ 1382{
1406 struct task_smack *tsp = current_security(); 1383 struct task_smack *tsp = current_security();
1407 1384
1408 if (*pos == SEQ_READ_FINISHED) 1385 return smk_seq_start(s, pos, &tsp->smk_rules);
1409 return NULL;
1410 if (list_empty(&tsp->smk_rules))
1411 return NULL;
1412 return tsp->smk_rules.next;
1413} 1386}
1414 1387
1415static void *load_self_seq_next(struct seq_file *s, void *v, loff_t *pos) 1388static void *load_self_seq_next(struct seq_file *s, void *v, loff_t *pos)
1416{ 1389{
1417 struct task_smack *tsp = current_security(); 1390 struct task_smack *tsp = current_security();
1418 struct list_head *list = v;
1419 1391
1420 if (list_is_last(list, &tsp->smk_rules)) { 1392 return smk_seq_next(s, v, pos, &tsp->smk_rules);
1421 *pos = SEQ_READ_FINISHED;
1422 return NULL;
1423 }
1424 return list->next;
1425} 1393}
1426 1394
1427static int load_self_seq_show(struct seq_file *s, void *v) 1395static int load_self_seq_show(struct seq_file *s, void *v)
@@ -1453,16 +1421,11 @@ static int load_self_seq_show(struct seq_file *s, void *v)
1453 return 0; 1421 return 0;
1454} 1422}
1455 1423
1456static void load_self_seq_stop(struct seq_file *s, void *v)
1457{
1458 /* No-op */
1459}
1460
1461static const struct seq_operations load_self_seq_ops = { 1424static const struct seq_operations load_self_seq_ops = {
1462 .start = load_self_seq_start, 1425 .start = load_self_seq_start,
1463 .next = load_self_seq_next, 1426 .next = load_self_seq_next,
1464 .show = load_self_seq_show, 1427 .show = load_self_seq_show,
1465 .stop = load_self_seq_stop, 1428 .stop = smk_seq_stop,
1466}; 1429};
1467 1430
1468 1431
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 738bbdf8d4c7..d9f3ced8756e 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -101,9 +101,8 @@ static char *tomoyo_get_absolute_path(struct path *path, char * const buffer,
101{ 101{
102 char *pos = ERR_PTR(-ENOMEM); 102 char *pos = ERR_PTR(-ENOMEM);
103 if (buflen >= 256) { 103 if (buflen >= 256) {
104 struct path ns_root = { };
105 /* go to whatever namespace root we are under */ 104 /* go to whatever namespace root we are under */
106 pos = __d_path(path, &ns_root, buffer, buflen - 1); 105 pos = d_absolute_path(path, buffer, buflen - 1);
107 if (!IS_ERR(pos) && *pos == '/' && pos[1]) { 106 if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
108 struct inode *inode = path->dentry->d_inode; 107 struct inode *inode = path->dentry->d_inode;
109 if (inode && S_ISDIR(inode->i_mode)) { 108 if (inode && S_ISDIR(inode->i_mode)) {
@@ -294,8 +293,16 @@ char *tomoyo_realpath_from_path(struct path *path)
294 pos = tomoyo_get_local_path(path->dentry, buf, 293 pos = tomoyo_get_local_path(path->dentry, buf,
295 buf_len - 1); 294 buf_len - 1);
296 /* Get absolute name for the rest. */ 295 /* Get absolute name for the rest. */
297 else 296 else {
298 pos = tomoyo_get_absolute_path(path, buf, buf_len - 1); 297 pos = tomoyo_get_absolute_path(path, buf, buf_len - 1);
298 /*
299 * Fall back to local name if absolute name is not
300 * available.
301 */
302 if (pos == ERR_PTR(-EINVAL))
303 pos = tomoyo_get_local_path(path->dentry, buf,
304 buf_len - 1);
305 }
299encode: 306encode:
300 if (IS_ERR(pos)) 307 if (IS_ERR(pos))
301 continue; 308 continue;
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 5dbab38d04af..130cfe677d60 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -52,6 +52,7 @@ struct link_slave {
52 struct link_ctl_info info; 52 struct link_ctl_info info;
53 int vals[2]; /* current values */ 53 int vals[2]; /* current values */
54 unsigned int flags; 54 unsigned int flags;
55 struct snd_kcontrol *kctl; /* original kcontrol pointer */
55 struct snd_kcontrol slave; /* the copy of original control entry */ 56 struct snd_kcontrol slave; /* the copy of original control entry */
56}; 57};
57 58
@@ -252,6 +253,7 @@ int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave,
252 slave->count * sizeof(*slave->vd), GFP_KERNEL); 253 slave->count * sizeof(*slave->vd), GFP_KERNEL);
253 if (!srec) 254 if (!srec)
254 return -ENOMEM; 255 return -ENOMEM;
256 srec->kctl = slave;
255 srec->slave = *slave; 257 srec->slave = *slave;
256 memcpy(srec->slave.vd, slave->vd, slave->count * sizeof(*slave->vd)); 258 memcpy(srec->slave.vd, slave->vd, slave->count * sizeof(*slave->vd));
257 srec->master = master_link; 259 srec->master = master_link;
@@ -333,10 +335,18 @@ static int master_put(struct snd_kcontrol *kcontrol,
333static void master_free(struct snd_kcontrol *kcontrol) 335static void master_free(struct snd_kcontrol *kcontrol)
334{ 336{
335 struct link_master *master = snd_kcontrol_chip(kcontrol); 337 struct link_master *master = snd_kcontrol_chip(kcontrol);
336 struct link_slave *slave; 338 struct link_slave *slave, *n;
337 339
338 list_for_each_entry(slave, &master->slaves, list) 340 /* free all slave links and retore the original slave kctls */
339 slave->master = NULL; 341 list_for_each_entry_safe(slave, n, &master->slaves, list) {
342 struct snd_kcontrol *sctl = slave->kctl;
343 struct list_head olist = sctl->list;
344 memcpy(sctl, &slave->slave, sizeof(*sctl));
345 memcpy(sctl->vd, slave->slave.vd,
346 sctl->count * sizeof(*sctl->vd));
347 sctl->list = olist; /* keep the current linked-list */
348 kfree(slave);
349 }
340 kfree(master); 350 kfree(master);
341} 351}
342 352
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index e083122ca55a..dbf94b189e75 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -148,7 +148,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
148 struct cs5535audio_dma_desc *desc = 148 struct cs5535audio_dma_desc *desc =
149 &((struct cs5535audio_dma_desc *) dma->desc_buf.area)[i]; 149 &((struct cs5535audio_dma_desc *) dma->desc_buf.area)[i];
150 desc->addr = cpu_to_le32(addr); 150 desc->addr = cpu_to_le32(addr);
151 desc->size = cpu_to_le32(period_bytes); 151 desc->size = cpu_to_le16(period_bytes);
152 desc->ctlreserved = cpu_to_le16(PRD_EOP); 152 desc->ctlreserved = cpu_to_le16(PRD_EOP);
153 desc_addr += sizeof(struct cs5535audio_dma_desc); 153 desc_addr += sizeof(struct cs5535audio_dma_desc);
154 addr += period_bytes; 154 addr += period_bytes;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 916a1863af73..4562e9de6a1a 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2331,6 +2331,39 @@ int snd_hda_codec_reset(struct hda_codec *codec)
2331 return 0; 2331 return 0;
2332} 2332}
2333 2333
2334typedef int (*map_slave_func_t)(void *, struct snd_kcontrol *);
2335
2336/* apply the function to all matching slave ctls in the mixer list */
2337static int map_slaves(struct hda_codec *codec, const char * const *slaves,
2338 map_slave_func_t func, void *data)
2339{
2340 struct hda_nid_item *items;
2341 const char * const *s;
2342 int i, err;
2343
2344 items = codec->mixers.list;
2345 for (i = 0; i < codec->mixers.used; i++) {
2346 struct snd_kcontrol *sctl = items[i].kctl;
2347 if (!sctl || !sctl->id.name ||
2348 sctl->id.iface != SNDRV_CTL_ELEM_IFACE_MIXER)
2349 continue;
2350 for (s = slaves; *s; s++) {
2351 if (!strcmp(sctl->id.name, *s)) {
2352 err = func(data, sctl);
2353 if (err)
2354 return err;
2355 break;
2356 }
2357 }
2358 }
2359 return 0;
2360}
2361
2362static int check_slave_present(void *data, struct snd_kcontrol *sctl)
2363{
2364 return 1;
2365}
2366
2334/** 2367/**
2335 * snd_hda_add_vmaster - create a virtual master control and add slaves 2368 * snd_hda_add_vmaster - create a virtual master control and add slaves
2336 * @codec: HD-audio codec 2369 * @codec: HD-audio codec
@@ -2351,12 +2384,10 @@ int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
2351 unsigned int *tlv, const char * const *slaves) 2384 unsigned int *tlv, const char * const *slaves)
2352{ 2385{
2353 struct snd_kcontrol *kctl; 2386 struct snd_kcontrol *kctl;
2354 const char * const *s;
2355 int err; 2387 int err;
2356 2388
2357 for (s = slaves; *s && !snd_hda_find_mixer_ctl(codec, *s); s++) 2389 err = map_slaves(codec, slaves, check_slave_present, NULL);
2358 ; 2390 if (err != 1) {
2359 if (!*s) {
2360 snd_printdd("No slave found for %s\n", name); 2391 snd_printdd("No slave found for %s\n", name);
2361 return 0; 2392 return 0;
2362 } 2393 }
@@ -2367,23 +2398,10 @@ int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
2367 if (err < 0) 2398 if (err < 0)
2368 return err; 2399 return err;
2369 2400
2370 for (s = slaves; *s; s++) { 2401 err = map_slaves(codec, slaves, (map_slave_func_t)snd_ctl_add_slave,
2371 struct snd_kcontrol *sctl; 2402 kctl);
2372 int i = 0; 2403 if (err < 0)
2373 for (;;) { 2404 return err;
2374 sctl = _snd_hda_find_mixer_ctl(codec, *s, i);
2375 if (!sctl) {
2376 if (!i)
2377 snd_printdd("Cannot find slave %s, "
2378 "skipped\n", *s);
2379 break;
2380 }
2381 err = snd_ctl_add_slave(kctl, sctl);
2382 if (err < 0)
2383 return err;
2384 i++;
2385 }
2386 }
2387 return 0; 2405 return 0;
2388} 2406}
2389EXPORT_SYMBOL_HDA(snd_hda_add_vmaster); 2407EXPORT_SYMBOL_HDA(snd_hda_add_vmaster);
@@ -4028,9 +4046,9 @@ int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
4028 4046
4029 /* Search for codec ID */ 4047 /* Search for codec ID */
4030 for (q = tbl; q->subvendor; q++) { 4048 for (q = tbl; q->subvendor; q++) {
4031 unsigned long vendorid = (q->subdevice) | (q->subvendor << 16); 4049 unsigned int mask = 0xffff0000 | q->subdevice_mask;
4032 4050 unsigned int id = (q->subdevice | (q->subvendor << 16)) & mask;
4033 if (vendorid == codec->subsystem_id) 4051 if ((codec->subsystem_id & mask) == id)
4034 break; 4052 break;
4035 } 4053 }
4036 4054
@@ -4752,6 +4770,7 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
4752 memset(sequences_hp, 0, sizeof(sequences_hp)); 4770 memset(sequences_hp, 0, sizeof(sequences_hp));
4753 assoc_line_out = 0; 4771 assoc_line_out = 0;
4754 4772
4773 codec->ignore_misc_bit = true;
4755 end_nid = codec->start_nid + codec->num_nodes; 4774 end_nid = codec->start_nid + codec->num_nodes;
4756 for (nid = codec->start_nid; nid < end_nid; nid++) { 4775 for (nid = codec->start_nid; nid < end_nid; nid++) {
4757 unsigned int wid_caps = get_wcaps(codec, nid); 4776 unsigned int wid_caps = get_wcaps(codec, nid);
@@ -4767,6 +4786,9 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
4767 continue; 4786 continue;
4768 4787
4769 def_conf = snd_hda_codec_get_pincfg(codec, nid); 4788 def_conf = snd_hda_codec_get_pincfg(codec, nid);
4789 if (!(get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid)) &
4790 AC_DEFCFG_MISC_NO_PRESENCE))
4791 codec->ignore_misc_bit = false;
4770 conn = get_defcfg_connect(def_conf); 4792 conn = get_defcfg_connect(def_conf);
4771 if (conn == AC_JACK_PORT_NONE) 4793 if (conn == AC_JACK_PORT_NONE)
4772 continue; 4794 continue;
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 755f2b0f9d8e..564471169cae 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -854,6 +854,7 @@ struct hda_codec {
854 unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */ 854 unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */
855 unsigned int pins_shutup:1; /* pins are shut up */ 855 unsigned int pins_shutup:1; /* pins are shut up */
856 unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */ 856 unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
857 unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */
857#ifdef CONFIG_SND_HDA_POWER_SAVE 858#ifdef CONFIG_SND_HDA_POWER_SAVE
858 unsigned int power_on :1; /* current (global) power-state */ 859 unsigned int power_on :1; /* current (global) power-state */
859 unsigned int power_transition :1; /* power-state in transition */ 860 unsigned int power_transition :1; /* power-state in transition */
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index 1c8ddf547a2d..c1da422e085a 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -297,10 +297,18 @@ static int hdmi_update_eld(struct hdmi_eld *e,
297 buf + ELD_FIXED_BYTES + mnl + 3 * i); 297 buf + ELD_FIXED_BYTES + mnl + 3 * i);
298 } 298 }
299 299
300 /*
301 * HDMI sink's ELD info cannot always be retrieved for now, e.g.
302 * in console or for audio devices. Assume the highest speakers
303 * configuration, to _not_ prohibit multi-channel audio playback.
304 */
305 if (!e->spk_alloc)
306 e->spk_alloc = 0xffff;
307
308 e->eld_valid = true;
300 return 0; 309 return 0;
301 310
302out_fail: 311out_fail:
303 e->eld_ver = 0;
304 return -EINVAL; 312 return -EINVAL;
305} 313}
306 314
@@ -323,9 +331,6 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
323 * ELD is valid, actual eld_size is assigned in hdmi_update_eld() 331 * ELD is valid, actual eld_size is assigned in hdmi_update_eld()
324 */ 332 */
325 333
326 if (!eld->eld_valid)
327 return -ENOENT;
328
329 size = snd_hdmi_get_eld_size(codec, nid); 334 size = snd_hdmi_get_eld_size(codec, nid);
330 if (size == 0) { 335 if (size == 0) {
331 /* wfg: workaround for ASUS P5E-VM HDMI board */ 336 /* wfg: workaround for ASUS P5E-VM HDMI board */
@@ -342,18 +347,28 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
342 347
343 for (i = 0; i < size; i++) { 348 for (i = 0; i < size; i++) {
344 unsigned int val = hdmi_get_eld_data(codec, nid, i); 349 unsigned int val = hdmi_get_eld_data(codec, nid, i);
350 /*
351 * Graphics driver might be writing to ELD buffer right now.
352 * Just abort. The caller will repoll after a while.
353 */
345 if (!(val & AC_ELDD_ELD_VALID)) { 354 if (!(val & AC_ELDD_ELD_VALID)) {
346 if (!i) {
347 snd_printd(KERN_INFO
348 "HDMI: invalid ELD data\n");
349 ret = -EINVAL;
350 goto error;
351 }
352 snd_printd(KERN_INFO 355 snd_printd(KERN_INFO
353 "HDMI: invalid ELD data byte %d\n", i); 356 "HDMI: invalid ELD data byte %d\n", i);
354 val = 0; 357 ret = -EINVAL;
355 } else 358 goto error;
356 val &= AC_ELDD_ELD_DATA; 359 }
360 val &= AC_ELDD_ELD_DATA;
361 /*
362 * The first byte cannot be zero. This can happen on some DVI
363 * connections. Some Intel chips may also need some 250ms delay
364 * to return non-zero ELD data, even when the graphics driver
365 * correctly writes ELD content before setting ELD_valid bit.
366 */
367 if (!val && !i) {
368 snd_printdd(KERN_INFO "HDMI: 0 ELD data\n");
369 ret = -EINVAL;
370 goto error;
371 }
357 buf[i] = val; 372 buf[i] = val;
358 } 373 }
359 374
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 096507d2ca9a..c2f79e63124d 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2507,8 +2507,8 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
2507 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), 2507 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
2508 SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), 2508 SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
2509 SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), 2509 SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
2510 SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
2510 SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), 2511 SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
2511 SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
2512 SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB), 2512 SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
2513 SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), 2513 SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
2514 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), 2514 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
@@ -2971,7 +2971,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
2971 /* SCH */ 2971 /* SCH */
2972 { PCI_DEVICE(0x8086, 0x811b), 2972 { PCI_DEVICE(0x8086, 0x811b),
2973 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | 2973 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
2974 AZX_DCAPS_BUFSIZE}, 2974 AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */
2975 /* ICH */
2975 { PCI_DEVICE(0x8086, 0x2668), 2976 { PCI_DEVICE(0x8086, 0x2668),
2976 .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC | 2977 .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
2977 AZX_DCAPS_BUFSIZE }, /* ICH6 */ 2978 AZX_DCAPS_BUFSIZE }, /* ICH6 */
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index dcbea0da0fa2..618ddad17236 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -510,13 +510,15 @@ int snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid);
510 510
511static inline bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid) 511static inline bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid)
512{ 512{
513 return (snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_PRES_DETECT) && 513 if (!(snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_PRES_DETECT))
514 /* disable MISC_NO_PRESENCE check because it may break too 514 return false;
515 * many devices 515 if (!codec->ignore_misc_bit &&
516 */ 516 (get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid)) &
517 /*(get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid) & 517 AC_DEFCFG_MISC_NO_PRESENCE))
518 AC_DEFCFG_MISC_NO_PRESENCE)) &&*/ 518 return false;
519 (get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP); 519 if (!(get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP))
520 return false;
521 return true;
520} 522}
521 523
522/* flags for hda_nid_item */ 524/* flags for hda_nid_item */
@@ -651,6 +653,9 @@ struct hdmi_eld {
651 int spk_alloc; 653 int spk_alloc;
652 int sad_count; 654 int sad_count;
653 struct cea_sad sad[ELD_MAX_SAD]; 655 struct cea_sad sad[ELD_MAX_SAD];
656 /*
657 * all fields above eld_buffer will be cleared before updating ELD
658 */
654 char eld_buffer[ELD_MAX_SIZE]; 659 char eld_buffer[ELD_MAX_SIZE];
655#ifdef CONFIG_PROC_FS 660#ifdef CONFIG_PROC_FS
656 struct snd_info_entry *proc_entry; 661 struct snd_info_entry *proc_entry;
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 2a2d8645ba09..70a7abda7e22 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -58,6 +58,8 @@ struct cs_spec {
58 unsigned int gpio_mask; 58 unsigned int gpio_mask;
59 unsigned int gpio_dir; 59 unsigned int gpio_dir;
60 unsigned int gpio_data; 60 unsigned int gpio_data;
61 unsigned int gpio_eapd_hp; /* EAPD GPIO bit for headphones */
62 unsigned int gpio_eapd_speaker; /* EAPD GPIO bit for speakers */
61 63
62 struct hda_pcm pcm_rec[2]; /* PCM information */ 64 struct hda_pcm pcm_rec[2]; /* PCM information */
63 65
@@ -76,6 +78,7 @@ enum {
76 CS420X_MBP53, 78 CS420X_MBP53,
77 CS420X_MBP55, 79 CS420X_MBP55,
78 CS420X_IMAC27, 80 CS420X_IMAC27,
81 CS420X_APPLE,
79 CS420X_AUTO, 82 CS420X_AUTO,
80 CS420X_MODELS 83 CS420X_MODELS
81}; 84};
@@ -237,6 +240,15 @@ static int cs_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
237 return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout); 240 return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout);
238} 241}
239 242
243static void cs_update_input_select(struct hda_codec *codec)
244{
245 struct cs_spec *spec = codec->spec;
246 if (spec->cur_adc)
247 snd_hda_codec_write(codec, spec->cur_adc, 0,
248 AC_VERB_SET_CONNECT_SEL,
249 spec->adc_idx[spec->cur_input]);
250}
251
240/* 252/*
241 * Analog capture 253 * Analog capture
242 */ 254 */
@@ -250,6 +262,7 @@ static int cs_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
250 spec->cur_adc = spec->adc_nid[spec->cur_input]; 262 spec->cur_adc = spec->adc_nid[spec->cur_input];
251 spec->cur_adc_stream_tag = stream_tag; 263 spec->cur_adc_stream_tag = stream_tag;
252 spec->cur_adc_format = format; 264 spec->cur_adc_format = format;
265 cs_update_input_select(codec);
253 snd_hda_codec_setup_stream(codec, spec->cur_adc, stream_tag, 0, format); 266 snd_hda_codec_setup_stream(codec, spec->cur_adc, stream_tag, 0, format);
254 return 0; 267 return 0;
255} 268}
@@ -689,10 +702,8 @@ static int change_cur_input(struct hda_codec *codec, unsigned int idx,
689 spec->cur_adc_stream_tag, 0, 702 spec->cur_adc_stream_tag, 0,
690 spec->cur_adc_format); 703 spec->cur_adc_format);
691 } 704 }
692 snd_hda_codec_write(codec, spec->cur_adc, 0,
693 AC_VERB_SET_CONNECT_SEL,
694 spec->adc_idx[idx]);
695 spec->cur_input = idx; 705 spec->cur_input = idx;
706 cs_update_input_select(codec);
696 return 1; 707 return 1;
697} 708}
698 709
@@ -920,10 +931,9 @@ static void cs_automute(struct hda_codec *codec)
920 spdif_present ? 0 : PIN_OUT); 931 spdif_present ? 0 : PIN_OUT);
921 } 932 }
922 } 933 }
923 if (spec->board_config == CS420X_MBP53 || 934 if (spec->gpio_eapd_hp) {
924 spec->board_config == CS420X_MBP55 || 935 unsigned int gpio = hp_present ?
925 spec->board_config == CS420X_IMAC27) { 936 spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
926 unsigned int gpio = hp_present ? 0x02 : 0x08;
927 snd_hda_codec_write(codec, 0x01, 0, 937 snd_hda_codec_write(codec, 0x01, 0,
928 AC_VERB_SET_GPIO_DATA, gpio); 938 AC_VERB_SET_GPIO_DATA, gpio);
929 } 939 }
@@ -973,10 +983,7 @@ static void cs_automic(struct hda_codec *codec)
973 } else { 983 } else {
974 spec->cur_input = spec->last_input; 984 spec->cur_input = spec->last_input;
975 } 985 }
976 986 cs_update_input_select(codec);
977 snd_hda_codec_write_cache(codec, spec->cur_adc, 0,
978 AC_VERB_SET_CONNECT_SEL,
979 spec->adc_idx[spec->cur_input]);
980 } else { 987 } else {
981 if (present) 988 if (present)
982 change_cur_input(codec, spec->automic_idx, 0); 989 change_cur_input(codec, spec->automic_idx, 0);
@@ -1073,9 +1080,7 @@ static void init_input(struct hda_codec *codec)
1073 cs_automic(codec); 1080 cs_automic(codec);
1074 else { 1081 else {
1075 spec->cur_adc = spec->adc_nid[spec->cur_input]; 1082 spec->cur_adc = spec->adc_nid[spec->cur_input];
1076 snd_hda_codec_write(codec, spec->cur_adc, 0, 1083 cs_update_input_select(codec);
1077 AC_VERB_SET_CONNECT_SEL,
1078 spec->adc_idx[spec->cur_input]);
1079 } 1084 }
1080 } else { 1085 } else {
1081 change_cur_input(codec, spec->cur_input, 1); 1086 change_cur_input(codec, spec->cur_input, 1);
@@ -1273,6 +1278,7 @@ static const char * const cs420x_models[CS420X_MODELS] = {
1273 [CS420X_MBP53] = "mbp53", 1278 [CS420X_MBP53] = "mbp53",
1274 [CS420X_MBP55] = "mbp55", 1279 [CS420X_MBP55] = "mbp55",
1275 [CS420X_IMAC27] = "imac27", 1280 [CS420X_IMAC27] = "imac27",
1281 [CS420X_APPLE] = "apple",
1276 [CS420X_AUTO] = "auto", 1282 [CS420X_AUTO] = "auto",
1277}; 1283};
1278 1284
@@ -1282,7 +1288,13 @@ static const struct snd_pci_quirk cs420x_cfg_tbl[] = {
1282 SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55), 1288 SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
1283 SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55), 1289 SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
1284 SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55), 1290 SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55),
1285 SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27), 1291 /* this conflicts with too many other models */
1292 /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
1293 {} /* terminator */
1294};
1295
1296static const struct snd_pci_quirk cs420x_codec_cfg_tbl[] = {
1297 SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
1286 {} /* terminator */ 1298 {} /* terminator */
1287}; 1299};
1288 1300
@@ -1364,6 +1376,10 @@ static int patch_cs420x(struct hda_codec *codec)
1364 spec->board_config = 1376 spec->board_config =
1365 snd_hda_check_board_config(codec, CS420X_MODELS, 1377 snd_hda_check_board_config(codec, CS420X_MODELS,
1366 cs420x_models, cs420x_cfg_tbl); 1378 cs420x_models, cs420x_cfg_tbl);
1379 if (spec->board_config < 0)
1380 spec->board_config =
1381 snd_hda_check_board_codec_sid_config(codec,
1382 CS420X_MODELS, NULL, cs420x_codec_cfg_tbl);
1367 if (spec->board_config >= 0) 1383 if (spec->board_config >= 0)
1368 fix_pincfg(codec, spec->board_config, cs_pincfgs); 1384 fix_pincfg(codec, spec->board_config, cs_pincfgs);
1369 1385
@@ -1371,10 +1387,11 @@ static int patch_cs420x(struct hda_codec *codec)
1371 case CS420X_IMAC27: 1387 case CS420X_IMAC27:
1372 case CS420X_MBP53: 1388 case CS420X_MBP53:
1373 case CS420X_MBP55: 1389 case CS420X_MBP55:
1374 /* GPIO1 = headphones */ 1390 case CS420X_APPLE:
1375 /* GPIO3 = speakers */ 1391 spec->gpio_eapd_hp = 2; /* GPIO1 = headphones */
1376 spec->gpio_mask = 0x0a; 1392 spec->gpio_eapd_speaker = 8; /* GPIO3 = speakers */
1377 spec->gpio_dir = 0x0a; 1393 spec->gpio_mask = spec->gpio_dir =
1394 spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
1378 break; 1395 break;
1379 } 1396 }
1380 1397
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 5e706e4d1737..0de21193a2b0 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3062,7 +3062,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3062 SND_PCI_QUIRK(0x1043, 0x1993, "Asus U50F", CXT5066_ASUS), 3062 SND_PCI_QUIRK(0x1043, 0x1993, "Asus U50F", CXT5066_ASUS),
3063 SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD), 3063 SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
3064 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), 3064 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
3065 SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
3066 SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board", 3065 SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board",
3067 CXT5066_LAPTOP), 3066 CXT5066_LAPTOP),
3068 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5), 3067 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 81b7b791b3c3..c505fd5d338c 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -65,7 +65,11 @@ struct hdmi_spec_per_pin {
65 hda_nid_t pin_nid; 65 hda_nid_t pin_nid;
66 int num_mux_nids; 66 int num_mux_nids;
67 hda_nid_t mux_nids[HDA_MAX_CONNECTIONS]; 67 hda_nid_t mux_nids[HDA_MAX_CONNECTIONS];
68
69 struct hda_codec *codec;
68 struct hdmi_eld sink_eld; 70 struct hdmi_eld sink_eld;
71 struct delayed_work work;
72 int repoll_count;
69}; 73};
70 74
71struct hdmi_spec { 75struct hdmi_spec {
@@ -745,8 +749,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx,
745 * Unsolicited events 749 * Unsolicited events
746 */ 750 */
747 751
748static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid, 752static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
749 struct hdmi_eld *eld);
750 753
751static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) 754static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
752{ 755{
@@ -755,7 +758,6 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
755 int pd = !!(res & AC_UNSOL_RES_PD); 758 int pd = !!(res & AC_UNSOL_RES_PD);
756 int eldv = !!(res & AC_UNSOL_RES_ELDV); 759 int eldv = !!(res & AC_UNSOL_RES_ELDV);
757 int pin_idx; 760 int pin_idx;
758 struct hdmi_eld *eld;
759 761
760 printk(KERN_INFO 762 printk(KERN_INFO
761 "HDMI hot plug event: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", 763 "HDMI hot plug event: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
@@ -764,17 +766,8 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
764 pin_idx = pin_nid_to_pin_index(spec, pin_nid); 766 pin_idx = pin_nid_to_pin_index(spec, pin_nid);
765 if (pin_idx < 0) 767 if (pin_idx < 0)
766 return; 768 return;
767 eld = &spec->pins[pin_idx].sink_eld;
768
769 hdmi_present_sense(codec, pin_nid, eld);
770 769
771 /* 770 hdmi_present_sense(&spec->pins[pin_idx], 1);
772 * HDMI sink's ELD info cannot always be retrieved for now, e.g.
773 * in console or for audio devices. Assume the highest speakers
774 * configuration, to _not_ prohibit multi-channel audio playback.
775 */
776 if (!eld->spk_alloc)
777 eld->spk_alloc = 0xffff;
778} 771}
779 772
780static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res) 773static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -968,9 +961,11 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
968 return 0; 961 return 0;
969} 962}
970 963
971static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid, 964static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
972 struct hdmi_eld *eld)
973{ 965{
966 struct hda_codec *codec = per_pin->codec;
967 struct hdmi_eld *eld = &per_pin->sink_eld;
968 hda_nid_t pin_nid = per_pin->pin_nid;
974 /* 969 /*
975 * Always execute a GetPinSense verb here, even when called from 970 * Always execute a GetPinSense verb here, even when called from
976 * hdmi_intrinsic_event; for some NVIDIA HW, the unsolicited 971 * hdmi_intrinsic_event; for some NVIDIA HW, the unsolicited
@@ -980,26 +975,42 @@ static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
980 * the unsolicited response to avoid custom WARs. 975 * the unsolicited response to avoid custom WARs.
981 */ 976 */
982 int present = snd_hda_pin_sense(codec, pin_nid); 977 int present = snd_hda_pin_sense(codec, pin_nid);
978 bool eld_valid = false;
983 979
984 memset(eld, 0, sizeof(*eld)); 980 memset(eld, 0, offsetof(struct hdmi_eld, eld_buffer));
985 981
986 eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE); 982 eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
987 if (eld->monitor_present) 983 if (eld->monitor_present)
988 eld->eld_valid = !!(present & AC_PINSENSE_ELDV); 984 eld_valid = !!(present & AC_PINSENSE_ELDV);
989 else
990 eld->eld_valid = 0;
991 985
992 printk(KERN_INFO 986 printk(KERN_INFO
993 "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", 987 "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
994 codec->addr, pin_nid, eld->monitor_present, eld->eld_valid); 988 codec->addr, pin_nid, eld->monitor_present, eld_valid);
995 989
996 if (eld->eld_valid) 990 if (eld_valid) {
997 if (!snd_hdmi_get_eld(eld, codec, pin_nid)) 991 if (!snd_hdmi_get_eld(eld, codec, pin_nid))
998 snd_hdmi_show_eld(eld); 992 snd_hdmi_show_eld(eld);
993 else if (repoll) {
994 queue_delayed_work(codec->bus->workq,
995 &per_pin->work,
996 msecs_to_jiffies(300));
997 }
998 }
999 999
1000 snd_hda_input_jack_report(codec, pin_nid); 1000 snd_hda_input_jack_report(codec, pin_nid);
1001} 1001}
1002 1002
1003static void hdmi_repoll_eld(struct work_struct *work)
1004{
1005 struct hdmi_spec_per_pin *per_pin =
1006 container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
1007
1008 if (per_pin->repoll_count++ > 6)
1009 per_pin->repoll_count = 0;
1010
1011 hdmi_present_sense(per_pin, per_pin->repoll_count);
1012}
1013
1003static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid) 1014static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
1004{ 1015{
1005 struct hdmi_spec *spec = codec->spec; 1016 struct hdmi_spec *spec = codec->spec;
@@ -1228,7 +1239,7 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx)
1228 if (err < 0) 1239 if (err < 0)
1229 return err; 1240 return err;
1230 1241
1231 hdmi_present_sense(codec, per_pin->pin_nid, &per_pin->sink_eld); 1242 hdmi_present_sense(per_pin, 0);
1232 return 0; 1243 return 0;
1233} 1244}
1234 1245
@@ -1279,6 +1290,8 @@ static int generic_hdmi_init(struct hda_codec *codec)
1279 AC_VERB_SET_UNSOLICITED_ENABLE, 1290 AC_VERB_SET_UNSOLICITED_ENABLE,
1280 AC_USRSP_EN | pin_nid); 1291 AC_USRSP_EN | pin_nid);
1281 1292
1293 per_pin->codec = codec;
1294 INIT_DELAYED_WORK(&per_pin->work, hdmi_repoll_eld);
1282 snd_hda_eld_proc_new(codec, eld, pin_idx); 1295 snd_hda_eld_proc_new(codec, eld, pin_idx);
1283 } 1296 }
1284 return 0; 1297 return 0;
@@ -1293,10 +1306,12 @@ static void generic_hdmi_free(struct hda_codec *codec)
1293 struct hdmi_spec_per_pin *per_pin = &spec->pins[pin_idx]; 1306 struct hdmi_spec_per_pin *per_pin = &spec->pins[pin_idx];
1294 struct hdmi_eld *eld = &per_pin->sink_eld; 1307 struct hdmi_eld *eld = &per_pin->sink_eld;
1295 1308
1309 cancel_delayed_work(&per_pin->work);
1296 snd_hda_eld_proc_free(codec, eld); 1310 snd_hda_eld_proc_free(codec, eld);
1297 } 1311 }
1298 snd_hda_input_jack_free(codec); 1312 snd_hda_input_jack_free(codec);
1299 1313
1314 flush_workqueue(codec->bus->workq);
1300 kfree(spec); 1315 kfree(spec);
1301} 1316}
1302 1317
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a24e068a021b..1d07e8fa2433 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -277,6 +277,12 @@ static bool alc_dyn_adc_pcm_resetup(struct hda_codec *codec, int cur)
277 return false; 277 return false;
278} 278}
279 279
280static inline hda_nid_t get_capsrc(struct alc_spec *spec, int idx)
281{
282 return spec->capsrc_nids ?
283 spec->capsrc_nids[idx] : spec->adc_nids[idx];
284}
285
280/* select the given imux item; either unmute exclusively or select the route */ 286/* select the given imux item; either unmute exclusively or select the route */
281static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx, 287static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
282 unsigned int idx, bool force) 288 unsigned int idx, bool force)
@@ -284,13 +290,15 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
284 struct alc_spec *spec = codec->spec; 290 struct alc_spec *spec = codec->spec;
285 const struct hda_input_mux *imux; 291 const struct hda_input_mux *imux;
286 unsigned int mux_idx; 292 unsigned int mux_idx;
287 int i, type; 293 int i, type, num_conns;
288 hda_nid_t nid; 294 hda_nid_t nid;
289 295
290 mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx; 296 mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
291 imux = &spec->input_mux[mux_idx]; 297 imux = &spec->input_mux[mux_idx];
292 if (!imux->num_items && mux_idx > 0) 298 if (!imux->num_items && mux_idx > 0)
293 imux = &spec->input_mux[0]; 299 imux = &spec->input_mux[0];
300 if (!imux->num_items)
301 return 0;
294 302
295 if (idx >= imux->num_items) 303 if (idx >= imux->num_items)
296 idx = imux->num_items - 1; 304 idx = imux->num_items - 1;
@@ -303,20 +311,20 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
303 adc_idx = spec->dyn_adc_idx[idx]; 311 adc_idx = spec->dyn_adc_idx[idx];
304 } 312 }
305 313
306 nid = spec->capsrc_nids ? 314 nid = get_capsrc(spec, adc_idx);
307 spec->capsrc_nids[adc_idx] : spec->adc_nids[adc_idx];
308 315
309 /* no selection? */ 316 /* no selection? */
310 if (snd_hda_get_conn_list(codec, nid, NULL) <= 1) 317 num_conns = snd_hda_get_conn_list(codec, nid, NULL);
318 if (num_conns <= 1)
311 return 1; 319 return 1;
312 320
313 type = get_wcaps_type(get_wcaps(codec, nid)); 321 type = get_wcaps_type(get_wcaps(codec, nid));
314 if (type == AC_WID_AUD_MIX) { 322 if (type == AC_WID_AUD_MIX) {
315 /* Matrix-mixer style (e.g. ALC882) */ 323 /* Matrix-mixer style (e.g. ALC882) */
316 for (i = 0; i < imux->num_items; i++) { 324 int active = imux->items[idx].index;
317 unsigned int v = (i == idx) ? 0 : HDA_AMP_MUTE; 325 for (i = 0; i < num_conns; i++) {
318 snd_hda_codec_amp_stereo(codec, nid, HDA_INPUT, 326 unsigned int v = (i == active) ? 0 : HDA_AMP_MUTE;
319 imux->items[i].index, 327 snd_hda_codec_amp_stereo(codec, nid, HDA_INPUT, i,
320 HDA_AMP_MUTE, v); 328 HDA_AMP_MUTE, v);
321 } 329 }
322 } else { 330 } else {
@@ -1053,8 +1061,19 @@ static bool alc_rebuild_imux_for_auto_mic(struct hda_codec *codec)
1053 spec->imux_pins[2] = spec->dock_mic_pin; 1061 spec->imux_pins[2] = spec->dock_mic_pin;
1054 for (i = 0; i < 3; i++) { 1062 for (i = 0; i < 3; i++) {
1055 strcpy(imux->items[i].label, texts[i]); 1063 strcpy(imux->items[i].label, texts[i]);
1056 if (spec->imux_pins[i]) 1064 if (spec->imux_pins[i]) {
1065 hda_nid_t pin = spec->imux_pins[i];
1066 int c;
1067 for (c = 0; c < spec->num_adc_nids; c++) {
1068 hda_nid_t cap = get_capsrc(spec, c);
1069 int idx = get_connection_index(codec, cap, pin);
1070 if (idx >= 0) {
1071 imux->items[i].index = idx;
1072 break;
1073 }
1074 }
1057 imux->num_items = i + 1; 1075 imux->num_items = i + 1;
1076 }
1058 } 1077 }
1059 spec->num_mux_defs = 1; 1078 spec->num_mux_defs = 1;
1060 spec->input_mux = imux; 1079 spec->input_mux = imux;
@@ -1451,7 +1470,7 @@ static void alc_apply_fixup(struct hda_codec *codec, int action)
1451 switch (fix->type) { 1470 switch (fix->type) {
1452 case ALC_FIXUP_SKU: 1471 case ALC_FIXUP_SKU:
1453 if (action != ALC_FIXUP_ACT_PRE_PROBE || !fix->v.sku) 1472 if (action != ALC_FIXUP_ACT_PRE_PROBE || !fix->v.sku)
1454 break;; 1473 break;
1455 snd_printdd(KERN_INFO "hda_codec: %s: " 1474 snd_printdd(KERN_INFO "hda_codec: %s: "
1456 "Apply sku override for %s\n", 1475 "Apply sku override for %s\n",
1457 codec->chip_name, modelname); 1476 codec->chip_name, modelname);
@@ -1956,10 +1975,8 @@ static int alc_build_controls(struct hda_codec *codec)
1956 if (!kctl) 1975 if (!kctl)
1957 kctl = snd_hda_find_mixer_ctl(codec, "Input Source"); 1976 kctl = snd_hda_find_mixer_ctl(codec, "Input Source");
1958 for (i = 0; kctl && i < kctl->count; i++) { 1977 for (i = 0; kctl && i < kctl->count; i++) {
1959 const hda_nid_t *nids = spec->capsrc_nids; 1978 err = snd_hda_add_nid(codec, kctl, i,
1960 if (!nids) 1979 get_capsrc(spec, i));
1961 nids = spec->adc_nids;
1962 err = snd_hda_add_nid(codec, kctl, i, nids[i]);
1963 if (err < 0) 1980 if (err < 0)
1964 return err; 1981 return err;
1965 } 1982 }
@@ -2614,6 +2631,8 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
2614 case AUTO_PIN_SPEAKER_OUT: 2631 case AUTO_PIN_SPEAKER_OUT:
2615 if (cfg->line_outs == 1) 2632 if (cfg->line_outs == 1)
2616 return "Speaker"; 2633 return "Speaker";
2634 if (cfg->line_outs == 2)
2635 return ch ? "Bass Speaker" : "Speaker";
2617 break; 2636 break;
2618 case AUTO_PIN_HP_OUT: 2637 case AUTO_PIN_HP_OUT:
2619 /* for multi-io case, only the primary out */ 2638 /* for multi-io case, only the primary out */
@@ -2746,8 +2765,7 @@ static int alc_auto_create_input_ctls(struct hda_codec *codec)
2746 } 2765 }
2747 2766
2748 for (c = 0; c < num_adcs; c++) { 2767 for (c = 0; c < num_adcs; c++) {
2749 hda_nid_t cap = spec->capsrc_nids ? 2768 hda_nid_t cap = get_capsrc(spec, c);
2750 spec->capsrc_nids[c] : spec->adc_nids[c];
2751 idx = get_connection_index(codec, cap, pin); 2769 idx = get_connection_index(codec, cap, pin);
2752 if (idx >= 0) { 2770 if (idx >= 0) {
2753 spec->imux_pins[imux->num_items] = pin; 2771 spec->imux_pins[imux->num_items] = pin;
@@ -2888,7 +2906,7 @@ static hda_nid_t alc_auto_look_for_dac(struct hda_codec *codec, hda_nid_t pin)
2888 if (!nid) 2906 if (!nid)
2889 continue; 2907 continue;
2890 if (found_in_nid_list(nid, spec->multiout.dac_nids, 2908 if (found_in_nid_list(nid, spec->multiout.dac_nids,
2891 spec->multiout.num_dacs)) 2909 ARRAY_SIZE(spec->private_dac_nids)))
2892 continue; 2910 continue;
2893 if (found_in_nid_list(nid, spec->multiout.hp_out_nid, 2911 if (found_in_nid_list(nid, spec->multiout.hp_out_nid,
2894 ARRAY_SIZE(spec->multiout.hp_out_nid))) 2912 ARRAY_SIZE(spec->multiout.hp_out_nid)))
@@ -2909,6 +2927,7 @@ static hda_nid_t get_dac_if_single(struct hda_codec *codec, hda_nid_t pin)
2909 return 0; 2927 return 0;
2910} 2928}
2911 2929
2930/* return 0 if no possible DAC is found, 1 if one or more found */
2912static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs, 2931static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
2913 const hda_nid_t *pins, hda_nid_t *dacs) 2932 const hda_nid_t *pins, hda_nid_t *dacs)
2914{ 2933{
@@ -2926,7 +2945,7 @@ static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
2926 if (!dacs[i]) 2945 if (!dacs[i])
2927 dacs[i] = alc_auto_look_for_dac(codec, pins[i]); 2946 dacs[i] = alc_auto_look_for_dac(codec, pins[i]);
2928 } 2947 }
2929 return 0; 2948 return 1;
2930} 2949}
2931 2950
2932static int alc_auto_fill_multi_ios(struct hda_codec *codec, 2951static int alc_auto_fill_multi_ios(struct hda_codec *codec,
@@ -2936,7 +2955,7 @@ static int alc_auto_fill_multi_ios(struct hda_codec *codec,
2936static int alc_auto_fill_dac_nids(struct hda_codec *codec) 2955static int alc_auto_fill_dac_nids(struct hda_codec *codec)
2937{ 2956{
2938 struct alc_spec *spec = codec->spec; 2957 struct alc_spec *spec = codec->spec;
2939 const struct auto_pin_cfg *cfg = &spec->autocfg; 2958 struct auto_pin_cfg *cfg = &spec->autocfg;
2940 bool redone = false; 2959 bool redone = false;
2941 int i; 2960 int i;
2942 2961
@@ -2947,6 +2966,7 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
2947 spec->multiout.extra_out_nid[0] = 0; 2966 spec->multiout.extra_out_nid[0] = 0;
2948 memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids)); 2967 memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids));
2949 spec->multiout.dac_nids = spec->private_dac_nids; 2968 spec->multiout.dac_nids = spec->private_dac_nids;
2969 spec->multi_ios = 0;
2950 2970
2951 /* fill hard-wired DACs first */ 2971 /* fill hard-wired DACs first */
2952 if (!redone) { 2972 if (!redone) {
@@ -2980,10 +3000,12 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
2980 for (i = 0; i < cfg->line_outs; i++) { 3000 for (i = 0; i < cfg->line_outs; i++) {
2981 if (spec->private_dac_nids[i]) 3001 if (spec->private_dac_nids[i])
2982 spec->multiout.num_dacs++; 3002 spec->multiout.num_dacs++;
2983 else 3003 else {
2984 memmove(spec->private_dac_nids + i, 3004 memmove(spec->private_dac_nids + i,
2985 spec->private_dac_nids + i + 1, 3005 spec->private_dac_nids + i + 1,
2986 sizeof(hda_nid_t) * (cfg->line_outs - i - 1)); 3006 sizeof(hda_nid_t) * (cfg->line_outs - i - 1));
3007 spec->private_dac_nids[cfg->line_outs - 1] = 0;
3008 }
2987 } 3009 }
2988 3010
2989 if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) { 3011 if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
@@ -3005,9 +3027,28 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
3005 if (cfg->line_out_type != AUTO_PIN_HP_OUT) 3027 if (cfg->line_out_type != AUTO_PIN_HP_OUT)
3006 alc_auto_fill_extra_dacs(codec, cfg->hp_outs, cfg->hp_pins, 3028 alc_auto_fill_extra_dacs(codec, cfg->hp_outs, cfg->hp_pins,
3007 spec->multiout.hp_out_nid); 3029 spec->multiout.hp_out_nid);
3008 if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) 3030 if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
3009 alc_auto_fill_extra_dacs(codec, cfg->speaker_outs, cfg->speaker_pins, 3031 int err = alc_auto_fill_extra_dacs(codec, cfg->speaker_outs,
3010 spec->multiout.extra_out_nid); 3032 cfg->speaker_pins,
3033 spec->multiout.extra_out_nid);
3034 /* if no speaker volume is assigned, try again as the primary
3035 * output
3036 */
3037 if (!err && cfg->speaker_outs > 0 &&
3038 cfg->line_out_type == AUTO_PIN_HP_OUT) {
3039 cfg->hp_outs = cfg->line_outs;
3040 memcpy(cfg->hp_pins, cfg->line_out_pins,
3041 sizeof(cfg->hp_pins));
3042 cfg->line_outs = cfg->speaker_outs;
3043 memcpy(cfg->line_out_pins, cfg->speaker_pins,
3044 sizeof(cfg->speaker_pins));
3045 cfg->speaker_outs = 0;
3046 memset(cfg->speaker_pins, 0, sizeof(cfg->speaker_pins));
3047 cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
3048 redone = false;
3049 goto again;
3050 }
3051 }
3011 3052
3012 return 0; 3053 return 0;
3013} 3054}
@@ -3157,7 +3198,8 @@ static int alc_auto_create_multi_out_ctls(struct hda_codec *codec,
3157} 3198}
3158 3199
3159static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin, 3200static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
3160 hda_nid_t dac, const char *pfx) 3201 hda_nid_t dac, const char *pfx,
3202 int cidx)
3161{ 3203{
3162 struct alc_spec *spec = codec->spec; 3204 struct alc_spec *spec = codec->spec;
3163 hda_nid_t sw, vol; 3205 hda_nid_t sw, vol;
@@ -3173,15 +3215,15 @@ static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
3173 if (is_ctl_used(spec->sw_ctls, val)) 3215 if (is_ctl_used(spec->sw_ctls, val))
3174 return 0; /* already created */ 3216 return 0; /* already created */
3175 mark_ctl_usage(spec->sw_ctls, val); 3217 mark_ctl_usage(spec->sw_ctls, val);
3176 return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, val); 3218 return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, cidx, val);
3177 } 3219 }
3178 3220
3179 sw = alc_look_for_out_mute_nid(codec, pin, dac); 3221 sw = alc_look_for_out_mute_nid(codec, pin, dac);
3180 vol = alc_look_for_out_vol_nid(codec, pin, dac); 3222 vol = alc_look_for_out_vol_nid(codec, pin, dac);
3181 err = alc_auto_add_stereo_vol(codec, pfx, 0, vol); 3223 err = alc_auto_add_stereo_vol(codec, pfx, cidx, vol);
3182 if (err < 0) 3224 if (err < 0)
3183 return err; 3225 return err;
3184 err = alc_auto_add_stereo_sw(codec, pfx, 0, sw); 3226 err = alc_auto_add_stereo_sw(codec, pfx, cidx, sw);
3185 if (err < 0) 3227 if (err < 0)
3186 return err; 3228 return err;
3187 return 0; 3229 return 0;
@@ -3222,16 +3264,21 @@ static int alc_auto_create_extra_outs(struct hda_codec *codec, int num_pins,
3222 hda_nid_t dac = *dacs; 3264 hda_nid_t dac = *dacs;
3223 if (!dac) 3265 if (!dac)
3224 dac = spec->multiout.dac_nids[0]; 3266 dac = spec->multiout.dac_nids[0];
3225 return alc_auto_create_extra_out(codec, *pins, dac, pfx); 3267 return alc_auto_create_extra_out(codec, *pins, dac, pfx, 0);
3226 } 3268 }
3227 3269
3228 if (dacs[num_pins - 1]) { 3270 if (dacs[num_pins - 1]) {
3229 /* OK, we have a multi-output system with individual volumes */ 3271 /* OK, we have a multi-output system with individual volumes */
3230 for (i = 0; i < num_pins; i++) { 3272 for (i = 0; i < num_pins; i++) {
3231 snprintf(name, sizeof(name), "%s %s", 3273 if (num_pins >= 3) {
3232 pfx, channel_name[i]); 3274 snprintf(name, sizeof(name), "%s %s",
3233 err = alc_auto_create_extra_out(codec, pins[i], dacs[i], 3275 pfx, channel_name[i]);
3234 name); 3276 err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
3277 name, 0);
3278 } else {
3279 err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
3280 pfx, i);
3281 }
3235 if (err < 0) 3282 if (err < 0)
3236 return err; 3283 return err;
3237 } 3284 }
@@ -3693,8 +3740,7 @@ static int init_capsrc_for_pin(struct hda_codec *codec, hda_nid_t pin)
3693 if (!pin) 3740 if (!pin)
3694 return 0; 3741 return 0;
3695 for (i = 0; i < spec->num_adc_nids; i++) { 3742 for (i = 0; i < spec->num_adc_nids; i++) {
3696 hda_nid_t cap = spec->capsrc_nids ? 3743 hda_nid_t cap = get_capsrc(spec, i);
3697 spec->capsrc_nids[i] : spec->adc_nids[i];
3698 int idx; 3744 int idx;
3699 3745
3700 idx = get_connection_index(codec, cap, pin); 3746 idx = get_connection_index(codec, cap, pin);
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 4e715fefebef..616678fde486 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -95,6 +95,7 @@ enum {
95 STAC_92HD83XXX_REF, 95 STAC_92HD83XXX_REF,
96 STAC_92HD83XXX_PWR_REF, 96 STAC_92HD83XXX_PWR_REF,
97 STAC_DELL_S14, 97 STAC_DELL_S14,
98 STAC_DELL_VOSTRO_3500,
98 STAC_92HD83XXX_HP, 99 STAC_92HD83XXX_HP,
99 STAC_92HD83XXX_HP_cNB11_INTQUAD, 100 STAC_92HD83XXX_HP_cNB11_INTQUAD,
100 STAC_HP_DV7_4000, 101 STAC_HP_DV7_4000,
@@ -214,6 +215,7 @@ struct sigmatel_spec {
214 unsigned int gpio_mute; 215 unsigned int gpio_mute;
215 unsigned int gpio_led; 216 unsigned int gpio_led;
216 unsigned int gpio_led_polarity; 217 unsigned int gpio_led_polarity;
218 unsigned int vref_mute_led_nid; /* pin NID for mute-LED vref control */
217 unsigned int vref_led; 219 unsigned int vref_led;
218 220
219 /* stream */ 221 /* stream */
@@ -226,7 +228,6 @@ struct sigmatel_spec {
226 228
227 /* power management */ 229 /* power management */
228 unsigned int num_pwrs; 230 unsigned int num_pwrs;
229 const unsigned int *pwr_mapping;
230 const hda_nid_t *pwr_nids; 231 const hda_nid_t *pwr_nids;
231 const hda_nid_t *dac_list; 232 const hda_nid_t *dac_list;
232 233
@@ -373,18 +374,15 @@ static const unsigned long stac92hd73xx_capvols[] = {
373 374
374#define STAC92HD83_DAC_COUNT 3 375#define STAC92HD83_DAC_COUNT 3
375 376
376static const hda_nid_t stac92hd83xxx_pwr_nids[4] = { 377static const hda_nid_t stac92hd83xxx_pwr_nids[7] = {
377 0xa, 0xb, 0xd, 0xe, 378 0x0a, 0x0b, 0x0c, 0xd, 0x0e,
379 0x0f, 0x10
378}; 380};
379 381
380static const hda_nid_t stac92hd83xxx_slave_dig_outs[2] = { 382static const hda_nid_t stac92hd83xxx_slave_dig_outs[2] = {
381 0x1e, 0, 383 0x1e, 0,
382}; 384};
383 385
384static const unsigned int stac92hd83xxx_pwr_mapping[4] = {
385 0x03, 0x0c, 0x20, 0x40,
386};
387
388static const hda_nid_t stac92hd83xxx_dmic_nids[] = { 386static const hda_nid_t stac92hd83xxx_dmic_nids[] = {
389 0x11, 0x20, 387 0x11, 0x20,
390}; 388};
@@ -1644,6 +1642,8 @@ static const struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
1644 "Alienware M17x", STAC_ALIENWARE_M17X), 1642 "Alienware M17x", STAC_ALIENWARE_M17X),
1645 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a, 1643 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
1646 "Alienware M17x", STAC_ALIENWARE_M17X), 1644 "Alienware M17x", STAC_ALIENWARE_M17X),
1645 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
1646 "Alienware M17x", STAC_ALIENWARE_M17X),
1647 {} /* terminator */ 1647 {} /* terminator */
1648}; 1648};
1649 1649
@@ -1659,6 +1659,12 @@ static const unsigned int dell_s14_pin_configs[10] = {
1659 0x40f000f0, 0x40f000f0, 1659 0x40f000f0, 0x40f000f0,
1660}; 1660};
1661 1661
1662static const unsigned int dell_vostro_3500_pin_configs[10] = {
1663 0x02a11020, 0x0221101f, 0x400000f0, 0x90170110,
1664 0x400000f1, 0x400000f2, 0x400000f3, 0x90a60160,
1665 0x400000f4, 0x400000f5,
1666};
1667
1662static const unsigned int hp_dv7_4000_pin_configs[10] = { 1668static const unsigned int hp_dv7_4000_pin_configs[10] = {
1663 0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110, 1669 0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110,
1664 0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140, 1670 0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140,
@@ -1675,6 +1681,7 @@ static const unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = {
1675 [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs, 1681 [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs,
1676 [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs, 1682 [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs,
1677 [STAC_DELL_S14] = dell_s14_pin_configs, 1683 [STAC_DELL_S14] = dell_s14_pin_configs,
1684 [STAC_DELL_VOSTRO_3500] = dell_vostro_3500_pin_configs,
1678 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = hp_cNB11_intquad_pin_configs, 1685 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = hp_cNB11_intquad_pin_configs,
1679 [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs, 1686 [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs,
1680}; 1687};
@@ -1684,6 +1691,7 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = {
1684 [STAC_92HD83XXX_REF] = "ref", 1691 [STAC_92HD83XXX_REF] = "ref",
1685 [STAC_92HD83XXX_PWR_REF] = "mic-ref", 1692 [STAC_92HD83XXX_PWR_REF] = "mic-ref",
1686 [STAC_DELL_S14] = "dell-s14", 1693 [STAC_DELL_S14] = "dell-s14",
1694 [STAC_DELL_VOSTRO_3500] = "dell-vostro-3500",
1687 [STAC_92HD83XXX_HP] = "hp", 1695 [STAC_92HD83XXX_HP] = "hp",
1688 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad", 1696 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad",
1689 [STAC_HP_DV7_4000] = "hp-dv7-4000", 1697 [STAC_HP_DV7_4000] = "hp-dv7-4000",
@@ -1697,6 +1705,8 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
1697 "DFI LanParty", STAC_92HD83XXX_REF), 1705 "DFI LanParty", STAC_92HD83XXX_REF),
1698 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba, 1706 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba,
1699 "unknown Dell", STAC_DELL_S14), 1707 "unknown Dell", STAC_DELL_S14),
1708 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x1028,
1709 "Dell Vostro 3500", STAC_DELL_VOSTRO_3500),
1700 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600, 1710 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600,
1701 "HP", STAC_92HD83XXX_HP), 1711 "HP", STAC_92HD83XXX_HP),
1702 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1656, 1712 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1656,
@@ -4309,12 +4319,10 @@ static void stac_store_hints(struct hda_codec *codec)
4309 spec->eapd_switch = val; 4319 spec->eapd_switch = val;
4310 get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity); 4320 get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity);
4311 if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) { 4321 if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) {
4312 if (spec->gpio_led <= 8) { 4322 spec->gpio_mask |= spec->gpio_led;
4313 spec->gpio_mask |= spec->gpio_led; 4323 spec->gpio_dir |= spec->gpio_led;
4314 spec->gpio_dir |= spec->gpio_led; 4324 if (spec->gpio_led_polarity)
4315 if (spec->gpio_led_polarity) 4325 spec->gpio_data |= spec->gpio_led;
4316 spec->gpio_data |= spec->gpio_led;
4317 }
4318 } 4326 }
4319} 4327}
4320 4328
@@ -4432,7 +4440,9 @@ static int stac92xx_init(struct hda_codec *codec)
4432 int pinctl, def_conf; 4440 int pinctl, def_conf;
4433 4441
4434 /* power on when no jack detection is available */ 4442 /* power on when no jack detection is available */
4435 if (!spec->hp_detect) { 4443 /* or when the VREF is used for controlling LED */
4444 if (!spec->hp_detect ||
4445 spec->vref_mute_led_nid == nid) {
4436 stac_toggle_power_map(codec, nid, 1); 4446 stac_toggle_power_map(codec, nid, 1);
4437 continue; 4447 continue;
4438 } 4448 }
@@ -4459,8 +4469,12 @@ static int stac92xx_init(struct hda_codec *codec)
4459 stac_toggle_power_map(codec, nid, 1); 4469 stac_toggle_power_map(codec, nid, 1);
4460 continue; 4470 continue;
4461 } 4471 }
4462 if (enable_pin_detect(codec, nid, STAC_PWR_EVENT)) 4472 if (enable_pin_detect(codec, nid, STAC_PWR_EVENT)) {
4463 stac_issue_unsol_event(codec, nid); 4473 stac_issue_unsol_event(codec, nid);
4474 continue;
4475 }
4476 /* none of the above, turn the port OFF */
4477 stac_toggle_power_map(codec, nid, 0);
4464 } 4478 }
4465 4479
4466 /* sync mute LED */ 4480 /* sync mute LED */
@@ -4716,11 +4730,7 @@ static void stac_toggle_power_map(struct hda_codec *codec, hda_nid_t nid,
4716 if (idx >= spec->num_pwrs) 4730 if (idx >= spec->num_pwrs)
4717 return; 4731 return;
4718 4732
4719 /* several codecs have two power down bits */ 4733 idx = 1 << idx;
4720 if (spec->pwr_mapping)
4721 idx = spec->pwr_mapping[idx];
4722 else
4723 idx = 1 << idx;
4724 4734
4725 val = snd_hda_codec_read(codec, codec->afg, 0, 0x0fec, 0x0) & 0xff; 4735 val = snd_hda_codec_read(codec, codec->afg, 0, 0x0fec, 0x0) & 0xff;
4726 if (enable) 4736 if (enable)
@@ -4904,8 +4914,14 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
4904 if (sscanf(dev->name, "HP_Mute_LED_%d_%x", 4914 if (sscanf(dev->name, "HP_Mute_LED_%d_%x",
4905 &spec->gpio_led_polarity, 4915 &spec->gpio_led_polarity,
4906 &spec->gpio_led) == 2) { 4916 &spec->gpio_led) == 2) {
4907 if (spec->gpio_led < 4) 4917 unsigned int max_gpio;
4918 max_gpio = snd_hda_param_read(codec, codec->afg,
4919 AC_PAR_GPIO_CAP);
4920 max_gpio &= AC_GPIO_IO_COUNT;
4921 if (spec->gpio_led < max_gpio)
4908 spec->gpio_led = 1 << spec->gpio_led; 4922 spec->gpio_led = 1 << spec->gpio_led;
4923 else
4924 spec->vref_mute_led_nid = spec->gpio_led;
4909 return 1; 4925 return 1;
4910 } 4926 }
4911 if (sscanf(dev->name, "HP_Mute_LED_%d", 4927 if (sscanf(dev->name, "HP_Mute_LED_%d",
@@ -4913,6 +4929,12 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
4913 set_hp_led_gpio(codec); 4929 set_hp_led_gpio(codec);
4914 return 1; 4930 return 1;
4915 } 4931 }
4932 /* BIOS bug: unfilled OEM string */
4933 if (strstr(dev->name, "HP_Mute_LED_P_G")) {
4934 set_hp_led_gpio(codec);
4935 spec->gpio_led_polarity = 1;
4936 return 1;
4937 }
4916 } 4938 }
4917 4939
4918 /* 4940 /*
@@ -5034,29 +5056,12 @@ static int stac92xx_pre_resume(struct hda_codec *codec)
5034 struct sigmatel_spec *spec = codec->spec; 5056 struct sigmatel_spec *spec = codec->spec;
5035 5057
5036 /* sync mute LED */ 5058 /* sync mute LED */
5037 if (spec->gpio_led) { 5059 if (spec->vref_mute_led_nid)
5038 if (spec->gpio_led <= 8) { 5060 stac_vrefout_set(codec, spec->vref_mute_led_nid,
5039 stac_gpio_set(codec, spec->gpio_mask, 5061 spec->vref_led);
5040 spec->gpio_dir, spec->gpio_data); 5062 else if (spec->gpio_led)
5041 } else { 5063 stac_gpio_set(codec, spec->gpio_mask,
5042 stac_vrefout_set(codec, 5064 spec->gpio_dir, spec->gpio_data);
5043 spec->gpio_led, spec->vref_led);
5044 }
5045 }
5046 return 0;
5047}
5048
5049static int stac92xx_post_suspend(struct hda_codec *codec)
5050{
5051 struct sigmatel_spec *spec = codec->spec;
5052 if (spec->gpio_led > 8) {
5053 /* with vref-out pin used for mute led control
5054 * codec AFG is prevented from D3 state, but on
5055 * system suspend it can (and should) be used
5056 */
5057 snd_hda_codec_read(codec, codec->afg, 0,
5058 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
5059 }
5060 return 0; 5065 return 0;
5061} 5066}
5062 5067
@@ -5067,7 +5072,7 @@ static void stac92xx_set_power_state(struct hda_codec *codec, hda_nid_t fg,
5067 struct sigmatel_spec *spec = codec->spec; 5072 struct sigmatel_spec *spec = codec->spec;
5068 5073
5069 if (power_state == AC_PWRST_D3) { 5074 if (power_state == AC_PWRST_D3) {
5070 if (spec->gpio_led > 8) { 5075 if (spec->vref_mute_led_nid) {
5071 /* with vref-out pin used for mute led control 5076 /* with vref-out pin used for mute led control
5072 * codec AFG is prevented from D3 state 5077 * codec AFG is prevented from D3 state
5073 */ 5078 */
@@ -5120,7 +5125,7 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
5120 } 5125 }
5121 } 5126 }
5122 /*polarity defines *not* muted state level*/ 5127 /*polarity defines *not* muted state level*/
5123 if (spec->gpio_led <= 8) { 5128 if (!spec->vref_mute_led_nid) {
5124 if (muted) 5129 if (muted)
5125 spec->gpio_data &= ~spec->gpio_led; /* orange */ 5130 spec->gpio_data &= ~spec->gpio_led; /* orange */
5126 else 5131 else
@@ -5138,7 +5143,8 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
5138 muted_lvl = spec->gpio_led_polarity ? 5143 muted_lvl = spec->gpio_led_polarity ?
5139 AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_HIZ; 5144 AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_HIZ;
5140 spec->vref_led = muted ? muted_lvl : notmtd_lvl; 5145 spec->vref_led = muted ? muted_lvl : notmtd_lvl;
5141 stac_vrefout_set(codec, spec->gpio_led, spec->vref_led); 5146 stac_vrefout_set(codec, spec->vref_mute_led_nid,
5147 spec->vref_led);
5142 } 5148 }
5143 return 0; 5149 return 0;
5144} 5150}
@@ -5618,9 +5624,6 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
5618 snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e); 5624 snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e);
5619 } 5625 }
5620 5626
5621 /* reset pin power-down; Windows may leave these bits after reboot */
5622 snd_hda_codec_write_cache(codec, codec->afg, 0, 0x7EC, 0);
5623 snd_hda_codec_write_cache(codec, codec->afg, 0, 0x7ED, 0);
5624 codec->no_trigger_sense = 1; 5627 codec->no_trigger_sense = 1;
5625 codec->spec = spec; 5628 codec->spec = spec;
5626 5629
@@ -5630,7 +5633,6 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
5630 codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs; 5633 codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs;
5631 spec->digbeep_nid = 0x21; 5634 spec->digbeep_nid = 0x21;
5632 spec->pwr_nids = stac92hd83xxx_pwr_nids; 5635 spec->pwr_nids = stac92hd83xxx_pwr_nids;
5633 spec->pwr_mapping = stac92hd83xxx_pwr_mapping;
5634 spec->num_pwrs = ARRAY_SIZE(stac92hd83xxx_pwr_nids); 5636 spec->num_pwrs = ARRAY_SIZE(stac92hd83xxx_pwr_nids);
5635 spec->multiout.dac_nids = spec->dac_nids; 5637 spec->multiout.dac_nids = spec->dac_nids;
5636 spec->init = stac92hd83xxx_core_init; 5638 spec->init = stac92hd83xxx_core_init;
@@ -5647,9 +5649,6 @@ again:
5647 stac92xx_set_config_regs(codec, 5649 stac92xx_set_config_regs(codec,
5648 stac92hd83xxx_brd_tbl[spec->board_config]); 5650 stac92hd83xxx_brd_tbl[spec->board_config]);
5649 5651
5650 if (spec->board_config != STAC_92HD83XXX_PWR_REF)
5651 spec->num_pwrs = 0;
5652
5653 codec->patch_ops = stac92xx_patch_ops; 5652 codec->patch_ops = stac92xx_patch_ops;
5654 5653
5655 if (find_mute_led_gpio(codec, 0)) 5654 if (find_mute_led_gpio(codec, 0))
@@ -5659,15 +5658,13 @@ again:
5659 5658
5660#ifdef CONFIG_SND_HDA_POWER_SAVE 5659#ifdef CONFIG_SND_HDA_POWER_SAVE
5661 if (spec->gpio_led) { 5660 if (spec->gpio_led) {
5662 if (spec->gpio_led <= 8) { 5661 if (!spec->vref_mute_led_nid) {
5663 spec->gpio_mask |= spec->gpio_led; 5662 spec->gpio_mask |= spec->gpio_led;
5664 spec->gpio_dir |= spec->gpio_led; 5663 spec->gpio_dir |= spec->gpio_led;
5665 spec->gpio_data |= spec->gpio_led; 5664 spec->gpio_data |= spec->gpio_led;
5666 } else { 5665 } else {
5667 codec->patch_ops.set_power_state = 5666 codec->patch_ops.set_power_state =
5668 stac92xx_set_power_state; 5667 stac92xx_set_power_state;
5669 codec->patch_ops.post_suspend =
5670 stac92xx_post_suspend;
5671 } 5668 }
5672 codec->patch_ops.pre_resume = stac92xx_pre_resume; 5669 codec->patch_ops.pre_resume = stac92xx_pre_resume;
5673 codec->patch_ops.check_power_status = 5670 codec->patch_ops.check_power_status =
@@ -5858,8 +5855,6 @@ again:
5858 (codec->revision_id & 0xf) == 1) 5855 (codec->revision_id & 0xf) == 1)
5859 spec->stream_delay = 40; /* 40 milliseconds */ 5856 spec->stream_delay = 40; /* 40 milliseconds */
5860 5857
5861 /* no output amps */
5862 spec->num_pwrs = 0;
5863 /* disable VSW */ 5858 /* disable VSW */
5864 spec->init = stac92hd71bxx_core_init; 5859 spec->init = stac92hd71bxx_core_init;
5865 unmute_init++; 5860 unmute_init++;
@@ -5874,8 +5869,6 @@ again:
5874 if ((codec->revision_id & 0xf) == 1) 5869 if ((codec->revision_id & 0xf) == 1)
5875 spec->stream_delay = 40; /* 40 milliseconds */ 5870 spec->stream_delay = 40; /* 40 milliseconds */
5876 5871
5877 /* no output amps */
5878 spec->num_pwrs = 0;
5879 /* fallthru */ 5872 /* fallthru */
5880 default: 5873 default:
5881 spec->init = stac92hd71bxx_core_init; 5874 spec->init = stac92hd71bxx_core_init;
@@ -5978,15 +5971,13 @@ again:
5978 5971
5979#ifdef CONFIG_SND_HDA_POWER_SAVE 5972#ifdef CONFIG_SND_HDA_POWER_SAVE
5980 if (spec->gpio_led) { 5973 if (spec->gpio_led) {
5981 if (spec->gpio_led <= 8) { 5974 if (!spec->vref_mute_led_nid) {
5982 spec->gpio_mask |= spec->gpio_led; 5975 spec->gpio_mask |= spec->gpio_led;
5983 spec->gpio_dir |= spec->gpio_led; 5976 spec->gpio_dir |= spec->gpio_led;
5984 spec->gpio_data |= spec->gpio_led; 5977 spec->gpio_data |= spec->gpio_led;
5985 } else { 5978 } else {
5986 codec->patch_ops.set_power_state = 5979 codec->patch_ops.set_power_state =
5987 stac92xx_set_power_state; 5980 stac92xx_set_power_state;
5988 codec->patch_ops.post_suspend =
5989 stac92xx_post_suspend;
5990 } 5981 }
5991 codec->patch_ops.pre_resume = stac92xx_pre_resume; 5982 codec->patch_ops.pre_resume = stac92xx_pre_resume;
5992 codec->patch_ops.check_power_status = 5983 codec->patch_ops.check_power_status =
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 431c0d417eeb..b5137629f8e9 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -208,6 +208,7 @@ struct via_spec {
208 /* work to check hp jack state */ 208 /* work to check hp jack state */
209 struct hda_codec *codec; 209 struct hda_codec *codec;
210 struct delayed_work vt1708_hp_work; 210 struct delayed_work vt1708_hp_work;
211 int hp_work_active;
211 int vt1708_jack_detect; 212 int vt1708_jack_detect;
212 int vt1708_hp_present; 213 int vt1708_hp_present;
213 214
@@ -305,27 +306,35 @@ enum {
305static void analog_low_current_mode(struct hda_codec *codec); 306static void analog_low_current_mode(struct hda_codec *codec);
306static bool is_aa_path_mute(struct hda_codec *codec); 307static bool is_aa_path_mute(struct hda_codec *codec);
307 308
308static void vt1708_start_hp_work(struct via_spec *spec) 309#define hp_detect_with_aa(codec) \
310 (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1 && \
311 !is_aa_path_mute(codec))
312
313static void vt1708_stop_hp_work(struct via_spec *spec)
309{ 314{
310 if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0) 315 if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
311 return; 316 return;
312 snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 317 if (spec->hp_work_active) {
313 !spec->vt1708_jack_detect); 318 snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 1);
314 if (!delayed_work_pending(&spec->vt1708_hp_work)) 319 cancel_delayed_work_sync(&spec->vt1708_hp_work);
315 schedule_delayed_work(&spec->vt1708_hp_work, 320 spec->hp_work_active = 0;
316 msecs_to_jiffies(100)); 321 }
317} 322}
318 323
319static void vt1708_stop_hp_work(struct via_spec *spec) 324static void vt1708_update_hp_work(struct via_spec *spec)
320{ 325{
321 if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0) 326 if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
322 return; 327 return;
323 if (snd_hda_get_bool_hint(spec->codec, "analog_loopback_hp_detect") == 1 328 if (spec->vt1708_jack_detect &&
324 && !is_aa_path_mute(spec->codec)) 329 (spec->active_streams || hp_detect_with_aa(spec->codec))) {
325 return; 330 if (!spec->hp_work_active) {
326 snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 331 snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 0);
327 !spec->vt1708_jack_detect); 332 schedule_delayed_work(&spec->vt1708_hp_work,
328 cancel_delayed_work_sync(&spec->vt1708_hp_work); 333 msecs_to_jiffies(100));
334 spec->hp_work_active = 1;
335 }
336 } else if (!hp_detect_with_aa(spec->codec))
337 vt1708_stop_hp_work(spec);
329} 338}
330 339
331static void set_widgets_power_state(struct hda_codec *codec) 340static void set_widgets_power_state(struct hda_codec *codec)
@@ -343,12 +352,7 @@ static int analog_input_switch_put(struct snd_kcontrol *kcontrol,
343 352
344 set_widgets_power_state(codec); 353 set_widgets_power_state(codec);
345 analog_low_current_mode(snd_kcontrol_chip(kcontrol)); 354 analog_low_current_mode(snd_kcontrol_chip(kcontrol));
346 if (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1) { 355 vt1708_update_hp_work(codec->spec);
347 if (is_aa_path_mute(codec))
348 vt1708_start_hp_work(codec->spec);
349 else
350 vt1708_stop_hp_work(codec->spec);
351 }
352 return change; 356 return change;
353} 357}
354 358
@@ -1154,7 +1158,7 @@ static int via_playback_multi_pcm_prepare(struct hda_pcm_stream *hinfo,
1154 spec->cur_dac_stream_tag = stream_tag; 1158 spec->cur_dac_stream_tag = stream_tag;
1155 spec->cur_dac_format = format; 1159 spec->cur_dac_format = format;
1156 mutex_unlock(&spec->config_mutex); 1160 mutex_unlock(&spec->config_mutex);
1157 vt1708_start_hp_work(spec); 1161 vt1708_update_hp_work(spec);
1158 return 0; 1162 return 0;
1159} 1163}
1160 1164
@@ -1174,7 +1178,7 @@ static int via_playback_hp_pcm_prepare(struct hda_pcm_stream *hinfo,
1174 spec->cur_hp_stream_tag = stream_tag; 1178 spec->cur_hp_stream_tag = stream_tag;
1175 spec->cur_hp_format = format; 1179 spec->cur_hp_format = format;
1176 mutex_unlock(&spec->config_mutex); 1180 mutex_unlock(&spec->config_mutex);
1177 vt1708_start_hp_work(spec); 1181 vt1708_update_hp_work(spec);
1178 return 0; 1182 return 0;
1179} 1183}
1180 1184
@@ -1188,7 +1192,7 @@ static int via_playback_multi_pcm_cleanup(struct hda_pcm_stream *hinfo,
1188 snd_hda_multi_out_analog_cleanup(codec, &spec->multiout); 1192 snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
1189 spec->active_streams &= ~STREAM_MULTI_OUT; 1193 spec->active_streams &= ~STREAM_MULTI_OUT;
1190 mutex_unlock(&spec->config_mutex); 1194 mutex_unlock(&spec->config_mutex);
1191 vt1708_stop_hp_work(spec); 1195 vt1708_update_hp_work(spec);
1192 return 0; 1196 return 0;
1193} 1197}
1194 1198
@@ -1203,7 +1207,7 @@ static int via_playback_hp_pcm_cleanup(struct hda_pcm_stream *hinfo,
1203 snd_hda_codec_setup_stream(codec, spec->hp_dac_nid, 0, 0, 0); 1207 snd_hda_codec_setup_stream(codec, spec->hp_dac_nid, 0, 0, 0);
1204 spec->active_streams &= ~STREAM_INDEP_HP; 1208 spec->active_streams &= ~STREAM_INDEP_HP;
1205 mutex_unlock(&spec->config_mutex); 1209 mutex_unlock(&spec->config_mutex);
1206 vt1708_stop_hp_work(spec); 1210 vt1708_update_hp_work(spec);
1207 return 0; 1211 return 0;
1208} 1212}
1209 1213
@@ -1645,7 +1649,8 @@ static void via_hp_automute(struct hda_codec *codec)
1645 int nums; 1649 int nums;
1646 struct via_spec *spec = codec->spec; 1650 struct via_spec *spec = codec->spec;
1647 1651
1648 if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0]) 1652 if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0] &&
1653 (spec->codec_type != VT1708 || spec->vt1708_jack_detect))
1649 present = snd_hda_jack_detect(codec, spec->autocfg.hp_pins[0]); 1654 present = snd_hda_jack_detect(codec, spec->autocfg.hp_pins[0]);
1650 1655
1651 if (spec->smart51_enabled) 1656 if (spec->smart51_enabled)
@@ -2612,8 +2617,6 @@ static int vt1708_jack_detect_get(struct snd_kcontrol *kcontrol,
2612 2617
2613 if (spec->codec_type != VT1708) 2618 if (spec->codec_type != VT1708)
2614 return 0; 2619 return 0;
2615 spec->vt1708_jack_detect =
2616 !((snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8) & 0x1);
2617 ucontrol->value.integer.value[0] = spec->vt1708_jack_detect; 2620 ucontrol->value.integer.value[0] = spec->vt1708_jack_detect;
2618 return 0; 2621 return 0;
2619} 2622}
@@ -2623,18 +2626,22 @@ static int vt1708_jack_detect_put(struct snd_kcontrol *kcontrol,
2623{ 2626{
2624 struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 2627 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
2625 struct via_spec *spec = codec->spec; 2628 struct via_spec *spec = codec->spec;
2626 int change; 2629 int val;
2627 2630
2628 if (spec->codec_type != VT1708) 2631 if (spec->codec_type != VT1708)
2629 return 0; 2632 return 0;
2630 spec->vt1708_jack_detect = ucontrol->value.integer.value[0]; 2633 val = !!ucontrol->value.integer.value[0];
2631 change = (0x1 & (snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8)) 2634 if (spec->vt1708_jack_detect == val)
2632 == !spec->vt1708_jack_detect; 2635 return 0;
2633 if (spec->vt1708_jack_detect) { 2636 spec->vt1708_jack_detect = val;
2637 if (spec->vt1708_jack_detect &&
2638 snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") != 1) {
2634 mute_aa_path(codec, 1); 2639 mute_aa_path(codec, 1);
2635 notify_aa_path_ctls(codec); 2640 notify_aa_path_ctls(codec);
2636 } 2641 }
2637 return change; 2642 via_hp_automute(codec);
2643 vt1708_update_hp_work(spec);
2644 return 1;
2638} 2645}
2639 2646
2640static const struct snd_kcontrol_new vt1708_jack_detect_ctl = { 2647static const struct snd_kcontrol_new vt1708_jack_detect_ctl = {
@@ -2771,6 +2778,7 @@ static int via_init(struct hda_codec *codec)
2771 via_auto_init_unsol_event(codec); 2778 via_auto_init_unsol_event(codec);
2772 2779
2773 via_hp_automute(codec); 2780 via_hp_automute(codec);
2781 vt1708_update_hp_work(spec);
2774 2782
2775 return 0; 2783 return 0;
2776} 2784}
@@ -2787,7 +2795,9 @@ static void vt1708_update_hp_jack_state(struct work_struct *work)
2787 spec->vt1708_hp_present ^= 1; 2795 spec->vt1708_hp_present ^= 1;
2788 via_hp_automute(spec->codec); 2796 via_hp_automute(spec->codec);
2789 } 2797 }
2790 vt1708_start_hp_work(spec); 2798 if (spec->vt1708_jack_detect)
2799 schedule_delayed_work(&spec->vt1708_hp_work,
2800 msecs_to_jiffies(100));
2791} 2801}
2792 2802
2793static int get_mux_nids(struct hda_codec *codec) 2803static int get_mux_nids(struct hda_codec *codec)
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 29e312597f20..11718b49b2e2 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -1077,6 +1077,13 @@ static snd_pcm_uframes_t snd_intel8x0_pcm_pointer(struct snd_pcm_substream *subs
1077 } 1077 }
1078 if (civ != igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV)) 1078 if (civ != igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV))
1079 continue; 1079 continue;
1080
1081 /* IO read operation is very expensive inside virtual machine
1082 * as it is emulated. The probability that subsequent PICB read
1083 * will return different result is high enough to loop till
1084 * timeout here.
1085 * Same CIV is strict enough condition to be sure that PICB
1086 * is valid inside VM on emulated card. */
1080 if (chip->inside_vm) 1087 if (chip->inside_vm)
1081 break; 1088 break;
1082 if (ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb)) 1089 if (ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb))
@@ -2930,6 +2937,45 @@ static unsigned int sis_codec_bits[3] = {
2930 ICH_PCR, ICH_SCR, ICH_SIS_TCR 2937 ICH_PCR, ICH_SCR, ICH_SIS_TCR
2931}; 2938};
2932 2939
2940static int __devinit snd_intel8x0_inside_vm(struct pci_dev *pci)
2941{
2942 int result = inside_vm;
2943 char *msg = NULL;
2944
2945 /* check module parameter first (override detection) */
2946 if (result >= 0) {
2947 msg = result ? "enable (forced) VM" : "disable (forced) VM";
2948 goto fini;
2949 }
2950
2951 /* detect KVM and Parallels virtual environments */
2952 result = kvm_para_available();
2953#ifdef X86_FEATURE_HYPERVISOR
2954 result = result || boot_cpu_has(X86_FEATURE_HYPERVISOR);
2955#endif
2956 if (!result)
2957 goto fini;
2958
2959 /* check for known (emulated) devices */
2960 if (pci->subsystem_vendor == 0x1af4 &&
2961 pci->subsystem_device == 0x1100) {
2962 /* KVM emulated sound, PCI SSID: 1af4:1100 */
2963 msg = "enable KVM";
2964 } else if (pci->subsystem_vendor == 0x1ab8) {
2965 /* Parallels VM emulated sound, PCI SSID: 1ab8:xxxx */
2966 msg = "enable Parallels VM";
2967 } else {
2968 msg = "disable (unknown or VT-d) VM";
2969 result = 0;
2970 }
2971
2972fini:
2973 if (msg != NULL)
2974 printk(KERN_INFO "intel8x0: %s optimization\n", msg);
2975
2976 return result;
2977}
2978
2933static int __devinit snd_intel8x0_create(struct snd_card *card, 2979static int __devinit snd_intel8x0_create(struct snd_card *card,
2934 struct pci_dev *pci, 2980 struct pci_dev *pci,
2935 unsigned long device_type, 2981 unsigned long device_type,
@@ -2997,9 +3043,7 @@ static int __devinit snd_intel8x0_create(struct snd_card *card,
2997 if (xbox) 3043 if (xbox)
2998 chip->xbox = 1; 3044 chip->xbox = 1;
2999 3045
3000 chip->inside_vm = inside_vm; 3046 chip->inside_vm = snd_intel8x0_inside_vm(pci);
3001 if (inside_vm)
3002 printk(KERN_INFO "intel8x0: enable KVM optimization\n");
3003 3047
3004 if (pci->vendor == PCI_VENDOR_ID_INTEL && 3048 if (pci->vendor == PCI_VENDOR_ID_INTEL &&
3005 pci->device == PCI_DEVICE_ID_INTEL_440MX) 3049 pci->device == PCI_DEVICE_ID_INTEL_440MX)
@@ -3243,14 +3287,6 @@ static int __devinit snd_intel8x0_probe(struct pci_dev *pci,
3243 buggy_irq = 0; 3287 buggy_irq = 0;
3244 } 3288 }
3245 3289
3246 if (inside_vm < 0) {
3247 /* detect KVM and Parallels virtual environments */
3248 inside_vm = kvm_para_available();
3249#if defined(__i386__) || defined(__x86_64__)
3250 inside_vm = inside_vm || boot_cpu_has(X86_FEATURE_HYPERVISOR);
3251#endif
3252 }
3253
3254 if ((err = snd_intel8x0_create(card, pci, pci_id->driver_data, 3290 if ((err = snd_intel8x0_create(card, pci, pci_id->driver_data,
3255 &chip)) < 0) { 3291 &chip)) < 0) {
3256 snd_card_free(card); 3292 snd_card_free(card);
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index 5c8717e29eeb..8c3e7fcefd99 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -78,10 +78,15 @@ unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
78 return ioread32(address); 78 return ioread32(address);
79} 79}
80 80
81void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len) 81static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
82 u32 len)
82{ 83{
83 void __iomem *address = lx_dsp_register(chip, port); 84 u32 __iomem *address = lx_dsp_register(chip, port);
84 memcpy_fromio(data, address, len*sizeof(u32)); 85 int i;
86
87 /* we cannot use memcpy_fromio */
88 for (i = 0; i != len; ++i)
89 data[i] = ioread32(address + i);
85} 90}
86 91
87 92
@@ -91,11 +96,15 @@ void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
91 iowrite32(data, address); 96 iowrite32(data, address);
92} 97}
93 98
94void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data, 99static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
95 u32 len) 100 const u32 *data, u32 len)
96{ 101{
97 void __iomem *address = lx_dsp_register(chip, port); 102 u32 __iomem *address = lx_dsp_register(chip, port);
98 memcpy_toio(address, data, len*sizeof(u32)); 103 int i;
104
105 /* we cannot use memcpy_to */
106 for (i = 0; i != len; ++i)
107 iowrite32(data[i], address + i);
99} 108}
100 109
101 110
diff --git a/sound/pci/lx6464es/lx_core.h b/sound/pci/lx6464es/lx_core.h
index 1dd562980b6c..4d7ff797a646 100644
--- a/sound/pci/lx6464es/lx_core.h
+++ b/sound/pci/lx6464es/lx_core.h
@@ -72,10 +72,7 @@ enum {
72}; 72};
73 73
74unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port); 74unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port);
75void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len);
76void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data); 75void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data);
77void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
78 u32 len);
79 76
80/* plx register access */ 77/* plx register access */
81enum { 78enum {
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index e760adad9523..19ee2203cbb5 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6518,7 +6518,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
6518 hdspm->io_type = AES32; 6518 hdspm->io_type = AES32;
6519 hdspm->card_name = "RME AES32"; 6519 hdspm->card_name = "RME AES32";
6520 hdspm->midiPorts = 2; 6520 hdspm->midiPorts = 2;
6521 } else if ((hdspm->firmware_rev == 0xd5) || 6521 } else if ((hdspm->firmware_rev == 0xd2) ||
6522 ((hdspm->firmware_rev >= 0xc8) && 6522 ((hdspm->firmware_rev >= 0xc8) &&
6523 (hdspm->firmware_rev <= 0xcf))) { 6523 (hdspm->firmware_rev <= 0xcf))) {
6524 hdspm->io_type = MADI; 6524 hdspm->io_type = MADI;
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index a391e622a192..28dfafb56dd1 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -41,6 +41,7 @@ MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}");
41static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */ 41static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
42static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ 42static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
43static int enable = 1; 43static int enable = 1;
44static int codecs = 1;
44 45
45module_param(index, int, 0444); 46module_param(index, int, 0444);
46MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator."); 47MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator.");
@@ -48,6 +49,8 @@ module_param(id, charp, 0444);
48MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator."); 49MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator.");
49module_param(enable, bool, 0444); 50module_param(enable, bool, 0444);
50MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator."); 51MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator.");
52module_param(codecs, int, 0444);
53MODULE_PARM_DESC(codecs, "Set bit to indicate that codec number is expected to be present (default 1)");
51 54
52static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = { 55static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = {
53 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) }, 56 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) },
@@ -140,6 +143,9 @@ struct sis7019 {
140 dma_addr_t silence_dma_addr; 143 dma_addr_t silence_dma_addr;
141}; 144};
142 145
146/* These values are also used by the module param 'codecs' to indicate
147 * which codecs should be present.
148 */
143#define SIS_PRIMARY_CODEC_PRESENT 0x0001 149#define SIS_PRIMARY_CODEC_PRESENT 0x0001
144#define SIS_SECONDARY_CODEC_PRESENT 0x0002 150#define SIS_SECONDARY_CODEC_PRESENT 0x0002
145#define SIS_TERTIARY_CODEC_PRESENT 0x0004 151#define SIS_TERTIARY_CODEC_PRESENT 0x0004
@@ -1078,6 +1084,7 @@ static int sis_chip_init(struct sis7019 *sis)
1078{ 1084{
1079 unsigned long io = sis->ioport; 1085 unsigned long io = sis->ioport;
1080 void __iomem *ioaddr = sis->ioaddr; 1086 void __iomem *ioaddr = sis->ioaddr;
1087 unsigned long timeout;
1081 u16 status; 1088 u16 status;
1082 int count; 1089 int count;
1083 int i; 1090 int i;
@@ -1104,21 +1111,45 @@ static int sis_chip_init(struct sis7019 *sis)
1104 while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count) 1111 while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count)
1105 udelay(1); 1112 udelay(1);
1106 1113
1114 /* Command complete, we can let go of the semaphore now.
1115 */
1116 outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
1117 if (!count)
1118 return -EIO;
1119
1107 /* Now that we've finished the reset, find out what's attached. 1120 /* Now that we've finished the reset, find out what's attached.
1121 * There are some codec/board combinations that take an extremely
1122 * long time to come up. 350+ ms has been observed in the field,
1123 * so we'll give them up to 500ms.
1108 */ 1124 */
1109 status = inl(io + SIS_AC97_STATUS); 1125 sis->codecs_present = 0;
1110 if (status & SIS_AC97_STATUS_CODEC_READY) 1126 timeout = msecs_to_jiffies(500) + jiffies;
1111 sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT; 1127 while (time_before_eq(jiffies, timeout)) {
1112 if (status & SIS_AC97_STATUS_CODEC2_READY) 1128 status = inl(io + SIS_AC97_STATUS);
1113 sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT; 1129 if (status & SIS_AC97_STATUS_CODEC_READY)
1114 if (status & SIS_AC97_STATUS_CODEC3_READY) 1130 sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
1115 sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT; 1131 if (status & SIS_AC97_STATUS_CODEC2_READY)
1116 1132 sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
1117 /* All done, let go of the semaphore, and check for errors 1133 if (status & SIS_AC97_STATUS_CODEC3_READY)
1134 sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
1135
1136 if (sis->codecs_present == codecs)
1137 break;
1138
1139 msleep(1);
1140 }
1141
1142 /* All done, check for errors.
1118 */ 1143 */
1119 outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA); 1144 if (!sis->codecs_present) {
1120 if (!sis->codecs_present || !count) 1145 printk(KERN_ERR "sis7019: could not find any codecs\n");
1121 return -EIO; 1146 return -EIO;
1147 }
1148
1149 if (sis->codecs_present != codecs) {
1150 printk(KERN_WARNING "sis7019: missing codecs, found %0x, expected %0x\n",
1151 sis->codecs_present, codecs);
1152 }
1122 1153
1123 /* Let the hardware know that the audio driver is alive, 1154 /* Let the hardware know that the audio driver is alive,
1124 * and enable PCM slots on the AC-link for L/R playback (3 & 4) and 1155 * and enable PCM slots on the AC-link for L/R playback (3 & 4) and
@@ -1390,6 +1421,17 @@ static int __devinit snd_sis7019_probe(struct pci_dev *pci,
1390 if (!enable) 1421 if (!enable)
1391 goto error_out; 1422 goto error_out;
1392 1423
1424 /* The user can specify which codecs should be present so that we
1425 * can wait for them to show up if they are slow to recover from
1426 * the AC97 cold reset. We default to a single codec, the primary.
1427 *
1428 * We assume that SIS_PRIMARY_*_PRESENT matches bits 0-2.
1429 */
1430 codecs &= SIS_PRIMARY_CODEC_PRESENT | SIS_SECONDARY_CODEC_PRESENT |
1431 SIS_TERTIARY_CODEC_PRESENT;
1432 if (!codecs)
1433 codecs = SIS_PRIMARY_CODEC_PRESENT;
1434
1393 rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card); 1435 rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card);
1394 if (rc < 0) 1436 if (rc < 0)
1395 goto error_out; 1437 goto error_out;
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index a3ce1b22620d..1aa52eff526a 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -876,7 +876,7 @@ static void __devinit snd_ps3_audio_set_base_addr(uint64_t ioaddr_start)
876 (0x0fUL << 12) | 876 (0x0fUL << 12) |
877 (PS3_AUDIO_IOID); 877 (PS3_AUDIO_IOID);
878 878
879 ret = lv1_gpu_attribute(0x100, 0x007, val, 0, 0); 879 ret = lv1_gpu_attribute(0x100, 0x007, val);
880 if (ret) 880 if (ret)
881 pr_info("%s: gpu_attribute failed %d\n", __func__, 881 pr_info("%s: gpu_attribute failed %d\n", __func__,
882 ret); 882 ret);
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index bee3c94f58b0..d1fcc816ce97 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -1,6 +1,6 @@
1config SND_ATMEL_SOC 1config SND_ATMEL_SOC
2 tristate "SoC Audio for the Atmel System-on-Chip" 2 tristate "SoC Audio for the Atmel System-on-Chip"
3 depends on ARCH_AT91 || AVR32 3 depends on ARCH_AT91
4 help 4 help
5 Say Y or M if you want to add support for codecs attached to 5 Say Y or M if you want to add support for codecs attached to
6 the ATMEL SSC interface. You will also need 6 the ATMEL SSC interface. You will also need
@@ -24,25 +24,6 @@ config SND_AT91_SOC_SAM9G20_WM8731
24 Say Y if you want to add support for SoC audio on WM8731-based 24 Say Y if you want to add support for SoC audio on WM8731-based
25 AT91sam9g20 evaluation board. 25 AT91sam9g20 evaluation board.
26 26
27config SND_AT32_SOC_PLAYPAQ
28 tristate "SoC Audio support for PlayPaq with WM8510"
29 depends on SND_ATMEL_SOC && BOARD_PLAYPAQ && AT91_PROGRAMMABLE_CLOCKS
30 select SND_ATMEL_SOC_SSC
31 select SND_SOC_WM8510
32 help
33 Say Y or M here if you want to add support for SoC audio
34 on the LRS PlayPaq.
35
36config SND_AT32_SOC_PLAYPAQ_SLAVE
37 bool "Run CODEC on PlayPaq in slave mode"
38 depends on SND_AT32_SOC_PLAYPAQ
39 default n
40 help
41 Say Y if you want to run with the AT32 SSC generating the BCLK
42 and FRAME signals on the PlayPaq. Unless you want to play
43 with the AT32 as the SSC master, you probably want to say N here,
44 as this will give you better sound quality.
45
46config SND_AT91_SOC_AFEB9260 27config SND_AT91_SOC_AFEB9260
47 tristate "SoC Audio support for AFEB9260 board" 28 tristate "SoC Audio support for AFEB9260 board"
48 depends on ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC 29 depends on ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC
diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
index e7ea56bd5f82..a5c0bf19da78 100644
--- a/sound/soc/atmel/Makefile
+++ b/sound/soc/atmel/Makefile
@@ -8,9 +8,5 @@ obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
8# AT91 Machine Support 8# AT91 Machine Support
9snd-soc-sam9g20-wm8731-objs := sam9g20_wm8731.o 9snd-soc-sam9g20-wm8731-objs := sam9g20_wm8731.o
10 10
11# AT32 Machine Support
12snd-soc-playpaq-objs := playpaq_wm8510.o
13
14obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o 11obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
15obj-$(CONFIG_SND_AT32_SOC_PLAYPAQ) += snd-soc-playpaq.o
16obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o 12obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o
diff --git a/sound/soc/atmel/playpaq_wm8510.c b/sound/soc/atmel/playpaq_wm8510.c
deleted file mode 100644
index 73ae99ad4578..000000000000
--- a/sound/soc/atmel/playpaq_wm8510.c
+++ /dev/null
@@ -1,473 +0,0 @@
1/* sound/soc/at32/playpaq_wm8510.c
2 * ASoC machine driver for PlayPaq using WM8510 codec
3 *
4 * Copyright (C) 2008 Long Range Systems
5 * Geoffrey Wossum <gwossum@acm.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This code is largely inspired by sound/soc/at91/eti_b1_wm8731.c
12 *
13 * NOTE: If you don't have the AT32 enhanced portmux configured (which
14 * isn't currently in the mainline or Atmel patched kernel), you will
15 * need to set the MCLK pin (PA30) to peripheral A in your board initialization
16 * code. Something like:
17 * at32_select_periph(GPIO_PIN_PA(30), GPIO_PERIPH_A, 0);
18 *
19 */
20
21/* #define DEBUG */
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/kernel.h>
26#include <linux/errno.h>
27#include <linux/clk.h>
28#include <linux/timer.h>
29#include <linux/interrupt.h>
30#include <linux/platform_device.h>
31
32#include <sound/core.h>
33#include <sound/pcm.h>
34#include <sound/pcm_params.h>
35#include <sound/soc.h>
36
37#include <mach/at32ap700x.h>
38#include <mach/portmux.h>
39
40#include "../codecs/wm8510.h"
41#include "atmel-pcm.h"
42#include "atmel_ssc_dai.h"
43
44
45/*-------------------------------------------------------------------------*\
46 * constants
47\*-------------------------------------------------------------------------*/
48#define MCLK_PIN GPIO_PIN_PA(30)
49#define MCLK_PERIPH GPIO_PERIPH_A
50
51
52/*-------------------------------------------------------------------------*\
53 * data types
54\*-------------------------------------------------------------------------*/
55/* SSC clocking data */
56struct ssc_clock_data {
57 /* CMR div */
58 unsigned int cmr_div;
59
60 /* Frame period (as needed by xCMR.PERIOD) */
61 unsigned int period;
62
63 /* The SSC clock rate these settings where calculated for */
64 unsigned long ssc_rate;
65};
66
67
68/*-------------------------------------------------------------------------*\
69 * module data
70\*-------------------------------------------------------------------------*/
71static struct clk *_gclk0;
72static struct clk *_pll0;
73
74#define CODEC_CLK (_gclk0)
75
76
77/*-------------------------------------------------------------------------*\
78 * Sound SOC operations
79\*-------------------------------------------------------------------------*/
80#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
81static struct ssc_clock_data playpaq_wm8510_calc_ssc_clock(
82 struct snd_pcm_hw_params *params,
83 struct snd_soc_dai *cpu_dai)
84{
85 struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
86 struct ssc_device *ssc = ssc_p->ssc;
87 struct ssc_clock_data cd;
88 unsigned int rate, width_bits, channels;
89 unsigned int bitrate, ssc_div;
90 unsigned actual_rate;
91
92
93 /*
94 * Figure out required bitrate
95 */
96 rate = params_rate(params);
97 channels = params_channels(params);
98 width_bits = snd_pcm_format_physical_width(params_format(params));
99 bitrate = rate * width_bits * channels;
100
101
102 /*
103 * Figure out required SSC divider and period for required bitrate
104 */
105 cd.ssc_rate = clk_get_rate(ssc->clk);
106 ssc_div = cd.ssc_rate / bitrate;
107 cd.cmr_div = ssc_div / 2;
108 if (ssc_div & 1) {
109 /* round cmr_div up */
110 cd.cmr_div++;
111 }
112 cd.period = width_bits - 1;
113
114
115 /*
116 * Find actual rate, compare to requested rate
117 */
118 actual_rate = (cd.ssc_rate / (cd.cmr_div * 2)) / (2 * (cd.period + 1));
119 pr_debug("playpaq_wm8510: Request rate = %u, actual rate = %u\n",
120 rate, actual_rate);
121
122
123 return cd;
124}
125#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
126
127
128
129static int playpaq_wm8510_hw_params(struct snd_pcm_substream *substream,
130 struct snd_pcm_hw_params *params)
131{
132 struct snd_soc_pcm_runtime *rtd = substream->private_data;
133 struct snd_soc_dai *codec_dai = rtd->codec_dai;
134 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
135 struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
136 struct ssc_device *ssc = ssc_p->ssc;
137 unsigned int pll_out = 0, bclk = 0, mclk_div = 0;
138 int ret;
139
140
141 /* Due to difficulties with getting the correct clocks from the AT32's
142 * PLL0, we're going to let the CODEC be in charge of all the clocks
143 */
144#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
145 const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
146 SND_SOC_DAIFMT_NB_NF |
147 SND_SOC_DAIFMT_CBM_CFM);
148#else
149 struct ssc_clock_data cd;
150 const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
151 SND_SOC_DAIFMT_NB_NF |
152 SND_SOC_DAIFMT_CBS_CFS);
153#endif
154
155 if (ssc == NULL) {
156 pr_warning("playpaq_wm8510_hw_params: ssc is NULL!\n");
157 return -EINVAL;
158 }
159
160
161 /*
162 * Figure out PLL and BCLK dividers for WM8510
163 */
164 switch (params_rate(params)) {
165 case 48000:
166 pll_out = 24576000;
167 mclk_div = WM8510_MCLKDIV_2;
168 bclk = WM8510_BCLKDIV_8;
169 break;
170
171 case 44100:
172 pll_out = 22579200;
173 mclk_div = WM8510_MCLKDIV_2;
174 bclk = WM8510_BCLKDIV_8;
175 break;
176
177 case 22050:
178 pll_out = 22579200;
179 mclk_div = WM8510_MCLKDIV_4;
180 bclk = WM8510_BCLKDIV_8;
181 break;
182
183 case 16000:
184 pll_out = 24576000;
185 mclk_div = WM8510_MCLKDIV_6;
186 bclk = WM8510_BCLKDIV_8;
187 break;
188
189 case 11025:
190 pll_out = 22579200;
191 mclk_div = WM8510_MCLKDIV_8;
192 bclk = WM8510_BCLKDIV_8;
193 break;
194
195 case 8000:
196 pll_out = 24576000;
197 mclk_div = WM8510_MCLKDIV_12;
198 bclk = WM8510_BCLKDIV_8;
199 break;
200
201 default:
202 pr_warning("playpaq_wm8510: Unsupported sample rate %d\n",
203 params_rate(params));
204 return -EINVAL;
205 }
206
207
208 /*
209 * set CPU and CODEC DAI configuration
210 */
211 ret = snd_soc_dai_set_fmt(codec_dai, fmt);
212 if (ret < 0) {
213 pr_warning("playpaq_wm8510: "
214 "Failed to set CODEC DAI format (%d)\n",
215 ret);
216 return ret;
217 }
218 ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
219 if (ret < 0) {
220 pr_warning("playpaq_wm8510: "
221 "Failed to set CPU DAI format (%d)\n",
222 ret);
223 return ret;
224 }
225
226
227 /*
228 * Set CPU clock configuration
229 */
230#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
231 cd = playpaq_wm8510_calc_ssc_clock(params, cpu_dai);
232 pr_debug("playpaq_wm8510: cmr_div = %d, period = %d\n",
233 cd.cmr_div, cd.period);
234 ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_CMR_DIV, cd.cmr_div);
235 if (ret < 0) {
236 pr_warning("playpaq_wm8510: Failed to set CPU CMR_DIV (%d)\n",
237 ret);
238 return ret;
239 }
240 ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_TCMR_PERIOD,
241 cd.period);
242 if (ret < 0) {
243 pr_warning("playpaq_wm8510: "
244 "Failed to set CPU transmit period (%d)\n",
245 ret);
246 return ret;
247 }
248#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
249
250
251 /*
252 * Set CODEC clock configuration
253 */
254 pr_debug("playpaq_wm8510: "
255 "pll_in = %ld, pll_out = %u, bclk = %x, mclk = %x\n",
256 clk_get_rate(CODEC_CLK), pll_out, bclk, mclk_div);
257
258
259#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
260 ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_BCLKDIV, bclk);
261 if (ret < 0) {
262 pr_warning
263 ("playpaq_wm8510: Failed to set CODEC DAI BCLKDIV (%d)\n",
264 ret);
265 return ret;
266 }
267#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
268
269
270 ret = snd_soc_dai_set_pll(codec_dai, 0, 0,
271 clk_get_rate(CODEC_CLK), pll_out);
272 if (ret < 0) {
273 pr_warning("playpaq_wm8510: Failed to set CODEC DAI PLL (%d)\n",
274 ret);
275 return ret;
276 }
277
278
279 ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_MCLKDIV, mclk_div);
280 if (ret < 0) {
281 pr_warning("playpaq_wm8510: Failed to set CODEC MCLKDIV (%d)\n",
282 ret);
283 return ret;
284 }
285
286
287 return 0;
288}
289
290
291
292static struct snd_soc_ops playpaq_wm8510_ops = {
293 .hw_params = playpaq_wm8510_hw_params,
294};
295
296
297
298static const struct snd_soc_dapm_widget playpaq_dapm_widgets[] = {
299 SND_SOC_DAPM_MIC("Int Mic", NULL),
300 SND_SOC_DAPM_SPK("Ext Spk", NULL),
301};
302
303
304
305static const struct snd_soc_dapm_route intercon[] = {
306 /* speaker connected to SPKOUT */
307 {"Ext Spk", NULL, "SPKOUTP"},
308 {"Ext Spk", NULL, "SPKOUTN"},
309
310 {"Mic Bias", NULL, "Int Mic"},
311 {"MICN", NULL, "Mic Bias"},
312 {"MICP", NULL, "Mic Bias"},
313};
314
315
316
317static int playpaq_wm8510_init(struct snd_soc_pcm_runtime *rtd)
318{
319 struct snd_soc_codec *codec = rtd->codec;
320 struct snd_soc_dapm_context *dapm = &codec->dapm;
321 int i;
322
323 /*
324 * Add DAPM widgets
325 */
326 for (i = 0; i < ARRAY_SIZE(playpaq_dapm_widgets); i++)
327 snd_soc_dapm_new_control(dapm, &playpaq_dapm_widgets[i]);
328
329
330
331 /*
332 * Setup audio path interconnects
333 */
334 snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
335
336
337
338 /* always connected pins */
339 snd_soc_dapm_enable_pin(dapm, "Int Mic");
340 snd_soc_dapm_enable_pin(dapm, "Ext Spk");
341
342
343
344 /* Make CSB show PLL rate */
345 snd_soc_dai_set_clkdiv(rtd->codec_dai, WM8510_OPCLKDIV,
346 WM8510_OPCLKDIV_1 | 4);
347
348 return 0;
349}
350
351
352
353static struct snd_soc_dai_link playpaq_wm8510_dai = {
354 .name = "WM8510",
355 .stream_name = "WM8510 PCM",
356 .cpu_dai_name= "atmel-ssc-dai.0",
357 .platform_name = "atmel-pcm-audio",
358 .codec_name = "wm8510-codec.0-0x1a",
359 .codec_dai_name = "wm8510-hifi",
360 .init = playpaq_wm8510_init,
361 .ops = &playpaq_wm8510_ops,
362};
363
364
365
366static struct snd_soc_card snd_soc_playpaq = {
367 .name = "LRS_PlayPaq_WM8510",
368 .dai_link = &playpaq_wm8510_dai,
369 .num_links = 1,
370};
371
372static struct platform_device *playpaq_snd_device;
373
374
375static int __init playpaq_asoc_init(void)
376{
377 int ret = 0;
378
379 /*
380 * Configure MCLK for WM8510
381 */
382 _gclk0 = clk_get(NULL, "gclk0");
383 if (IS_ERR(_gclk0)) {
384 _gclk0 = NULL;
385 ret = PTR_ERR(_gclk0);
386 goto err_gclk0;
387 }
388 _pll0 = clk_get(NULL, "pll0");
389 if (IS_ERR(_pll0)) {
390 _pll0 = NULL;
391 ret = PTR_ERR(_pll0);
392 goto err_pll0;
393 }
394 ret = clk_set_parent(_gclk0, _pll0);
395 if (ret) {
396 pr_warning("snd-soc-playpaq: "
397 "Failed to set PLL0 as parent for DAC clock\n");
398 goto err_set_clk;
399 }
400 clk_set_rate(CODEC_CLK, 12000000);
401 clk_enable(CODEC_CLK);
402
403#if defined CONFIG_AT32_ENHANCED_PORTMUX
404 at32_select_periph(MCLK_PIN, MCLK_PERIPH, 0);
405#endif
406
407
408 /*
409 * Create and register platform device
410 */
411 playpaq_snd_device = platform_device_alloc("soc-audio", 0);
412 if (playpaq_snd_device == NULL) {
413 ret = -ENOMEM;
414 goto err_device_alloc;
415 }
416
417 platform_set_drvdata(playpaq_snd_device, &snd_soc_playpaq);
418
419 ret = platform_device_add(playpaq_snd_device);
420 if (ret) {
421 pr_warning("playpaq_wm8510: platform_device_add failed (%d)\n",
422 ret);
423 goto err_device_add;
424 }
425
426 return 0;
427
428
429err_device_add:
430 if (playpaq_snd_device != NULL) {
431 platform_device_put(playpaq_snd_device);
432 playpaq_snd_device = NULL;
433 }
434err_device_alloc:
435err_set_clk:
436 if (_pll0 != NULL) {
437 clk_put(_pll0);
438 _pll0 = NULL;
439 }
440err_pll0:
441 if (_gclk0 != NULL) {
442 clk_put(_gclk0);
443 _gclk0 = NULL;
444 }
445 return ret;
446}
447
448
449static void __exit playpaq_asoc_exit(void)
450{
451 if (_gclk0 != NULL) {
452 clk_put(_gclk0);
453 _gclk0 = NULL;
454 }
455 if (_pll0 != NULL) {
456 clk_put(_pll0);
457 _pll0 = NULL;
458 }
459
460#if defined CONFIG_AT32_ENHANCED_PORTMUX
461 at32_free_pin(MCLK_PIN);
462#endif
463
464 platform_device_unregister(playpaq_snd_device);
465 playpaq_snd_device = NULL;
466}
467
468module_init(playpaq_asoc_init);
469module_exit(playpaq_asoc_exit);
470
471MODULE_AUTHOR("Geoffrey Wossum <gwossum@acm.org>");
472MODULE_DESCRIPTION("ASoC machine driver for LRS PlayPaq");
473MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 4584514d93d4..fa787d45d74a 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -33,7 +33,7 @@ config SND_SOC_ALL_CODECS
33 select SND_SOC_CX20442 33 select SND_SOC_CX20442
34 select SND_SOC_DA7210 if I2C 34 select SND_SOC_DA7210 if I2C
35 select SND_SOC_DFBMCS320 35 select SND_SOC_DFBMCS320
36 select SND_SOC_JZ4740_CODEC if SOC_JZ4740 36 select SND_SOC_JZ4740_CODEC
37 select SND_SOC_LM4857 if I2C 37 select SND_SOC_LM4857 if I2C
38 select SND_SOC_MAX98088 if I2C 38 select SND_SOC_MAX98088 if I2C
39 select SND_SOC_MAX98095 if I2C 39 select SND_SOC_MAX98095 if I2C
diff --git a/sound/soc/codecs/ad1836.h b/sound/soc/codecs/ad1836.h
index 444747f0db26..dd7be0dbbc58 100644
--- a/sound/soc/codecs/ad1836.h
+++ b/sound/soc/codecs/ad1836.h
@@ -34,7 +34,7 @@
34 34
35#define AD1836_ADC_CTRL2 13 35#define AD1836_ADC_CTRL2 13
36#define AD1836_ADC_WORD_LEN_MASK 0x30 36#define AD1836_ADC_WORD_LEN_MASK 0x30
37#define AD1836_ADC_WORD_OFFSET 5 37#define AD1836_ADC_WORD_OFFSET 4
38#define AD1836_ADC_SERFMT_MASK (7 << 6) 38#define AD1836_ADC_SERFMT_MASK (7 << 6)
39#define AD1836_ADC_SERFMT_PCK256 (0x4 << 6) 39#define AD1836_ADC_SERFMT_PCK256 (0x4 << 6)
40#define AD1836_ADC_SERFMT_PCK128 (0x5 << 6) 40#define AD1836_ADC_SERFMT_PCK128 (0x5 << 6)
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index 1ccf8dd47576..45c63028b40d 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -245,7 +245,7 @@ static const char *adau1373_bass_hpf_cutoff_text[] = {
245}; 245};
246 246
247static const unsigned int adau1373_bass_tlv[] = { 247static const unsigned int adau1373_bass_tlv[] = {
248 TLV_DB_RANGE_HEAD(4), 248 TLV_DB_RANGE_HEAD(3),
249 0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1), 249 0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1),
250 3, 4, TLV_DB_SCALE_ITEM(950, 250, 0), 250 3, 4, TLV_DB_SCALE_ITEM(950, 250, 0),
251 5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0), 251 5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0),
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index f1f237ecec2a..73f46eb459f1 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -601,7 +601,6 @@ static int cs4270_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
601static int cs4270_soc_resume(struct snd_soc_codec *codec) 601static int cs4270_soc_resume(struct snd_soc_codec *codec)
602{ 602{
603 struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec); 603 struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
604 struct i2c_client *i2c_client = to_i2c_client(codec->dev);
605 int reg; 604 int reg;
606 605
607 regulator_bulk_enable(ARRAY_SIZE(cs4270->supplies), 606 regulator_bulk_enable(ARRAY_SIZE(cs4270->supplies),
@@ -612,14 +611,7 @@ static int cs4270_soc_resume(struct snd_soc_codec *codec)
612 ndelay(500); 611 ndelay(500);
613 612
614 /* first restore the entire register cache ... */ 613 /* first restore the entire register cache ... */
615 for (reg = CS4270_FIRSTREG; reg <= CS4270_LASTREG; reg++) { 614 snd_soc_cache_sync(codec);
616 u8 val = snd_soc_read(codec, reg);
617
618 if (i2c_smbus_write_byte_data(i2c_client, reg, val)) {
619 dev_err(codec->dev, "i2c write failed\n");
620 return -EIO;
621 }
622 }
623 615
624 /* ... then disable the power-down bits */ 616 /* ... then disable the power-down bits */
625 reg = snd_soc_read(codec, CS4270_PWRCTL); 617 reg = snd_soc_read(codec, CS4270_PWRCTL);
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 23d1bd5dadda..69fde1506fe1 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -434,7 +434,8 @@ static int cs4271_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
434{ 434{
435 int ret; 435 int ret;
436 /* Set power-down bit */ 436 /* Set power-down bit */
437 ret = snd_soc_update_bits(codec, CS4271_MODE2, 0, CS4271_MODE2_PDN); 437 ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN,
438 CS4271_MODE2_PDN);
438 if (ret < 0) 439 if (ret < 0)
439 return ret; 440 return ret;
440 return 0; 441 return 0;
@@ -501,8 +502,9 @@ static int cs4271_probe(struct snd_soc_codec *codec)
501 return ret; 502 return ret;
502 } 503 }
503 504
504 ret = snd_soc_update_bits(codec, CS4271_MODE2, 0, 505 ret = snd_soc_update_bits(codec, CS4271_MODE2,
505 CS4271_MODE2_PDN | CS4271_MODE2_CPEN); 506 CS4271_MODE2_PDN | CS4271_MODE2_CPEN,
507 CS4271_MODE2_PDN | CS4271_MODE2_CPEN);
506 if (ret < 0) 508 if (ret < 0)
507 return ret; 509 return ret;
508 ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN, 0); 510 ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN, 0);
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
index 8c3c8205d19e..1ee66361f61b 100644
--- a/sound/soc/codecs/cs42l51.c
+++ b/sound/soc/codecs/cs42l51.c
@@ -555,7 +555,7 @@ static int cs42l51_probe(struct snd_soc_codec *codec)
555 555
556static struct snd_soc_codec_driver soc_codec_device_cs42l51 = { 556static struct snd_soc_codec_driver soc_codec_device_cs42l51 = {
557 .probe = cs42l51_probe, 557 .probe = cs42l51_probe,
558 .reg_cache_size = CS42L51_NUMREGS, 558 .reg_cache_size = CS42L51_NUMREGS + 1,
559 .reg_word_size = sizeof(u8), 559 .reg_word_size = sizeof(u8),
560}; 560};
561 561
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index e373f8f06907..3e1f4e172bfb 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/io.h>
18 19
19#include <linux/delay.h> 20#include <linux/delay.h>
20 21
diff --git a/sound/soc/codecs/max9877.c b/sound/soc/codecs/max9877.c
index 9e7e964a5fa3..dcf6f2a1600a 100644
--- a/sound/soc/codecs/max9877.c
+++ b/sound/soc/codecs/max9877.c
@@ -106,13 +106,13 @@ static int max9877_set_2reg(struct snd_kcontrol *kcontrol,
106 unsigned int mask = mc->max; 106 unsigned int mask = mc->max;
107 unsigned int val = (ucontrol->value.integer.value[0] & mask); 107 unsigned int val = (ucontrol->value.integer.value[0] & mask);
108 unsigned int val2 = (ucontrol->value.integer.value[1] & mask); 108 unsigned int val2 = (ucontrol->value.integer.value[1] & mask);
109 unsigned int change = 1; 109 unsigned int change = 0;
110 110
111 if (((max9877_regs[reg] >> shift) & mask) == val) 111 if (((max9877_regs[reg] >> shift) & mask) != val)
112 change = 0; 112 change = 1;
113 113
114 if (((max9877_regs[reg2] >> shift) & mask) == val2) 114 if (((max9877_regs[reg2] >> shift) & mask) != val2)
115 change = 0; 115 change = 1;
116 116
117 if (change) { 117 if (change) {
118 max9877_regs[reg] &= ~(mask << shift); 118 max9877_regs[reg] &= ~(mask << shift);
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 27a078cbb6eb..4646e808b90a 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -177,7 +177,7 @@ static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -95625, 375, 0);
177static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); 177static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
178/* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */ 178/* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */
179static unsigned int mic_bst_tlv[] = { 179static unsigned int mic_bst_tlv[] = {
180 TLV_DB_RANGE_HEAD(6), 180 TLV_DB_RANGE_HEAD(7),
181 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), 181 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
182 1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0), 182 1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
183 2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0), 183 2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index d15695d1c273..bbcf921166f7 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -365,7 +365,7 @@ static const DECLARE_TLV_DB_SCALE(capture_6db_attenuate, -600, 600, 0);
365 365
366/* tlv for mic gain, 0db 20db 30db 40db */ 366/* tlv for mic gain, 0db 20db 30db 40db */
367static const unsigned int mic_gain_tlv[] = { 367static const unsigned int mic_gain_tlv[] = {
368 TLV_DB_RANGE_HEAD(4), 368 TLV_DB_RANGE_HEAD(2),
369 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), 369 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
370 1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0), 370 1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
371}; 371};
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index bb82408ab8e1..d2f37152f940 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -76,6 +76,8 @@ struct sta32x_priv {
76 76
77 unsigned int mclk; 77 unsigned int mclk;
78 unsigned int format; 78 unsigned int format;
79
80 u32 coef_shadow[STA32X_COEF_COUNT];
79}; 81};
80 82
81static const DECLARE_TLV_DB_SCALE(mvol_tlv, -12700, 50, 1); 83static const DECLARE_TLV_DB_SCALE(mvol_tlv, -12700, 50, 1);
@@ -227,6 +229,7 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
227 struct snd_ctl_elem_value *ucontrol) 229 struct snd_ctl_elem_value *ucontrol)
228{ 230{
229 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 231 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
232 struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
230 int numcoef = kcontrol->private_value >> 16; 233 int numcoef = kcontrol->private_value >> 16;
231 int index = kcontrol->private_value & 0xffff; 234 int index = kcontrol->private_value & 0xffff;
232 unsigned int cfud; 235 unsigned int cfud;
@@ -239,6 +242,11 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
239 snd_soc_write(codec, STA32X_CFUD, cfud); 242 snd_soc_write(codec, STA32X_CFUD, cfud);
240 243
241 snd_soc_write(codec, STA32X_CFADDR2, index); 244 snd_soc_write(codec, STA32X_CFADDR2, index);
245 for (i = 0; i < numcoef && (index + i < STA32X_COEF_COUNT); i++)
246 sta32x->coef_shadow[index + i] =
247 (ucontrol->value.bytes.data[3 * i] << 16)
248 | (ucontrol->value.bytes.data[3 * i + 1] << 8)
249 | (ucontrol->value.bytes.data[3 * i + 2]);
242 for (i = 0; i < 3 * numcoef; i++) 250 for (i = 0; i < 3 * numcoef; i++)
243 snd_soc_write(codec, STA32X_B1CF1 + i, 251 snd_soc_write(codec, STA32X_B1CF1 + i,
244 ucontrol->value.bytes.data[i]); 252 ucontrol->value.bytes.data[i]);
@@ -252,6 +260,48 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
252 return 0; 260 return 0;
253} 261}
254 262
263int sta32x_sync_coef_shadow(struct snd_soc_codec *codec)
264{
265 struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
266 unsigned int cfud;
267 int i;
268
269 /* preserve reserved bits in STA32X_CFUD */
270 cfud = snd_soc_read(codec, STA32X_CFUD) & 0xf0;
271
272 for (i = 0; i < STA32X_COEF_COUNT; i++) {
273 snd_soc_write(codec, STA32X_CFADDR2, i);
274 snd_soc_write(codec, STA32X_B1CF1,
275 (sta32x->coef_shadow[i] >> 16) & 0xff);
276 snd_soc_write(codec, STA32X_B1CF2,
277 (sta32x->coef_shadow[i] >> 8) & 0xff);
278 snd_soc_write(codec, STA32X_B1CF3,
279 (sta32x->coef_shadow[i]) & 0xff);
280 /* chip documentation does not say if the bits are
281 * self-clearing, so do it explicitly */
282 snd_soc_write(codec, STA32X_CFUD, cfud);
283 snd_soc_write(codec, STA32X_CFUD, cfud | 0x01);
284 }
285 return 0;
286}
287
288int sta32x_cache_sync(struct snd_soc_codec *codec)
289{
290 unsigned int mute;
291 int rc;
292
293 if (!codec->cache_sync)
294 return 0;
295
296 /* mute during register sync */
297 mute = snd_soc_read(codec, STA32X_MMUTE);
298 snd_soc_write(codec, STA32X_MMUTE, mute | STA32X_MMUTE_MMUTE);
299 sta32x_sync_coef_shadow(codec);
300 rc = snd_soc_cache_sync(codec);
301 snd_soc_write(codec, STA32X_MMUTE, mute);
302 return rc;
303}
304
255#define SINGLE_COEF(xname, index) \ 305#define SINGLE_COEF(xname, index) \
256{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ 306{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
257 .info = sta32x_coefficient_info, \ 307 .info = sta32x_coefficient_info, \
@@ -661,7 +711,7 @@ static int sta32x_set_bias_level(struct snd_soc_codec *codec,
661 return ret; 711 return ret;
662 } 712 }
663 713
664 snd_soc_cache_sync(codec); 714 sta32x_cache_sync(codec);
665 } 715 }
666 716
667 /* Power up to mute */ 717 /* Power up to mute */
@@ -790,6 +840,17 @@ static int sta32x_probe(struct snd_soc_codec *codec)
790 STA32X_CxCFG_OM_MASK, 840 STA32X_CxCFG_OM_MASK,
791 2 << STA32X_CxCFG_OM_SHIFT); 841 2 << STA32X_CxCFG_OM_SHIFT);
792 842
843 /* initialize coefficient shadow RAM with reset values */
844 for (i = 4; i <= 49; i += 5)
845 sta32x->coef_shadow[i] = 0x400000;
846 for (i = 50; i <= 54; i++)
847 sta32x->coef_shadow[i] = 0x7fffff;
848 sta32x->coef_shadow[55] = 0x5a9df7;
849 sta32x->coef_shadow[56] = 0x7fffff;
850 sta32x->coef_shadow[59] = 0x7fffff;
851 sta32x->coef_shadow[60] = 0x400000;
852 sta32x->coef_shadow[61] = 0x400000;
853
793 sta32x_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 854 sta32x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
794 /* Bias level configuration will have done an extra enable */ 855 /* Bias level configuration will have done an extra enable */
795 regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies); 856 regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies);
diff --git a/sound/soc/codecs/sta32x.h b/sound/soc/codecs/sta32x.h
index b97ee5a75667..d8e32a6262ee 100644
--- a/sound/soc/codecs/sta32x.h
+++ b/sound/soc/codecs/sta32x.h
@@ -19,6 +19,7 @@
19/* STA326 register addresses */ 19/* STA326 register addresses */
20 20
21#define STA32X_REGISTER_COUNT 0x2d 21#define STA32X_REGISTER_COUNT 0x2d
22#define STA32X_COEF_COUNT 62
22 23
23#define STA32X_CONFA 0x00 24#define STA32X_CONFA 0x00
24#define STA32X_CONFB 0x01 25#define STA32X_CONFB 0x01
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index c5ca8cfea60f..0441893e270e 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -863,13 +863,13 @@ static struct i2c_driver uda1380_i2c_driver = {
863 863
864static int __init uda1380_modinit(void) 864static int __init uda1380_modinit(void)
865{ 865{
866 int ret; 866 int ret = 0;
867#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 867#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
868 ret = i2c_add_driver(&uda1380_i2c_driver); 868 ret = i2c_add_driver(&uda1380_i2c_driver);
869 if (ret != 0) 869 if (ret != 0)
870 pr_err("Failed to register UDA1380 I2C driver: %d\n", ret); 870 pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
871#endif 871#endif
872 return 0; 872 return ret;
873} 873}
874module_init(uda1380_modinit); 874module_init(uda1380_modinit);
875 875
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 7e5ec03f6f8d..a7c9ae17fc7e 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -453,6 +453,7 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
453 snd_soc_write(codec, WM8731_PWR, 0xffff); 453 snd_soc_write(codec, WM8731_PWR, 0xffff);
454 regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), 454 regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
455 wm8731->supplies); 455 wm8731->supplies);
456 codec->cache_sync = 1;
456 break; 457 break;
457 } 458 }
458 codec->dapm.bias_level = level; 459 codec->dapm.bias_level = level;
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index a9504710bb69..3a629d0d690e 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -190,6 +190,9 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol,
190 struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec); 190 struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
191 u16 ioctl; 191 u16 ioctl;
192 192
193 if (wm8753->dai_func == ucontrol->value.integer.value[0])
194 return 0;
195
193 if (codec->active) 196 if (codec->active)
194 return -EBUSY; 197 return -EBUSY;
195 198
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index 0293763debe5..5a14d5c0e0e1 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -60,6 +60,8 @@ static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
60 } 60 }
61 61
62 if (memcmp(fw->data, "WMFW", 4) != 0) { 62 if (memcmp(fw->data, "WMFW", 4) != 0) {
63 memcpy(&data32, fw->data, sizeof(data32));
64 data32 = be32_to_cpu(data32);
63 dev_err(codec->dev, "%s: firmware has bad file magic %08x\n", 65 dev_err(codec->dev, "%s: firmware has bad file magic %08x\n",
64 name, data32); 66 name, data32);
65 goto err; 67 goto err;
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 91d3c6dbeba3..53edd9a8c758 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1973,7 +1973,7 @@ static int wm8962_reset(struct snd_soc_codec *codec)
1973static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0); 1973static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
1974static const DECLARE_TLV_DB_SCALE(mixin_tlv, -1500, 300, 0); 1974static const DECLARE_TLV_DB_SCALE(mixin_tlv, -1500, 300, 0);
1975static const unsigned int mixinpga_tlv[] = { 1975static const unsigned int mixinpga_tlv[] = {
1976 TLV_DB_RANGE_HEAD(7), 1976 TLV_DB_RANGE_HEAD(5),
1977 0, 1, TLV_DB_SCALE_ITEM(0, 600, 0), 1977 0, 1, TLV_DB_SCALE_ITEM(0, 600, 0),
1978 2, 2, TLV_DB_SCALE_ITEM(1300, 1300, 0), 1978 2, 2, TLV_DB_SCALE_ITEM(1300, 1300, 0),
1979 3, 4, TLV_DB_SCALE_ITEM(1800, 200, 0), 1979 3, 4, TLV_DB_SCALE_ITEM(1800, 200, 0),
@@ -1988,7 +1988,7 @@ static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
1988static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); 1988static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
1989static const DECLARE_TLV_DB_SCALE(hp_tlv, -700, 100, 0); 1989static const DECLARE_TLV_DB_SCALE(hp_tlv, -700, 100, 0);
1990static const unsigned int classd_tlv[] = { 1990static const unsigned int classd_tlv[] = {
1991 TLV_DB_RANGE_HEAD(7), 1991 TLV_DB_RANGE_HEAD(2),
1992 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0), 1992 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
1993 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0), 1993 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
1994}; 1994};
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index eec8e1435116..d1a142f48b09 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -512,7 +512,7 @@ static const DECLARE_TLV_DB_SCALE(drc_comp_threash, -4500, 75, 0);
512static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0); 512static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0);
513static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0); 513static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
514static const unsigned int drc_max_tlv[] = { 514static const unsigned int drc_max_tlv[] = {
515 TLV_DB_RANGE_HEAD(4), 515 TLV_DB_RANGE_HEAD(2),
516 0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0), 516 0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
517 3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0), 517 3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
518}; 518};
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 6b73efd26991..d0c545b73d78 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -56,7 +56,7 @@ static int wm8994_retune_mobile_base[] = {
56static int wm8994_readable(struct snd_soc_codec *codec, unsigned int reg) 56static int wm8994_readable(struct snd_soc_codec *codec, unsigned int reg)
57{ 57{
58 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 58 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
59 struct wm8994 *control = wm8994->control_data; 59 struct wm8994 *control = codec->control_data;
60 60
61 switch (reg) { 61 switch (reg) {
62 case WM8994_GPIO_1: 62 case WM8994_GPIO_1:
@@ -1325,15 +1325,15 @@ SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
1325}; 1325};
1326 1326
1327static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = { 1327static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
1328SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux, 1328SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
1329 adc_mux_ev, SND_SOC_DAPM_PRE_PMU), 1329 adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
1330SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux, 1330SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
1331 adc_mux_ev, SND_SOC_DAPM_PRE_PMU), 1331 adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
1332}; 1332};
1333 1333
1334static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = { 1334static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
1335SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), 1335SND_SOC_DAPM_VIRT_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
1336SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), 1336SND_SOC_DAPM_VIRT_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
1337}; 1337};
1338 1338
1339static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { 1339static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
@@ -2357,6 +2357,11 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
2357 bclk |= best << WM8994_AIF1_BCLK_DIV_SHIFT; 2357 bclk |= best << WM8994_AIF1_BCLK_DIV_SHIFT;
2358 2358
2359 lrclk = bclk_rate / params_rate(params); 2359 lrclk = bclk_rate / params_rate(params);
2360 if (!lrclk) {
2361 dev_err(dai->dev, "Unable to generate LRCLK from %dHz BCLK\n",
2362 bclk_rate);
2363 return -EINVAL;
2364 }
2360 dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n", 2365 dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n",
2361 lrclk, bclk_rate / lrclk); 2366 lrclk, bclk_rate / lrclk);
2362 2367
@@ -3030,19 +3035,34 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
3030{ 3035{
3031 struct wm8994_priv *wm8994 = data; 3036 struct wm8994_priv *wm8994 = data;
3032 struct snd_soc_codec *codec = wm8994->codec; 3037 struct snd_soc_codec *codec = wm8994->codec;
3033 int reg; 3038 int reg, count;
3034 3039
3035 reg = snd_soc_read(codec, WM8958_MIC_DETECT_3); 3040 /* We may occasionally read a detection without an impedence
3036 if (reg < 0) { 3041 * range being provided - if that happens loop again.
3037 dev_err(codec->dev, "Failed to read mic detect status: %d\n", 3042 */
3038 reg); 3043 count = 10;
3039 return IRQ_NONE; 3044 do {
3040 } 3045 reg = snd_soc_read(codec, WM8958_MIC_DETECT_3);
3046 if (reg < 0) {
3047 dev_err(codec->dev,
3048 "Failed to read mic detect status: %d\n",
3049 reg);
3050 return IRQ_NONE;
3051 }
3041 3052
3042 if (!(reg & WM8958_MICD_VALID)) { 3053 if (!(reg & WM8958_MICD_VALID)) {
3043 dev_dbg(codec->dev, "Mic detect data not valid\n"); 3054 dev_dbg(codec->dev, "Mic detect data not valid\n");
3044 goto out; 3055 goto out;
3045 } 3056 }
3057
3058 if (!(reg & WM8958_MICD_STS) || (reg & WM8958_MICD_LVL_MASK))
3059 break;
3060
3061 msleep(1);
3062 } while (count--);
3063
3064 if (count == 0)
3065 dev_warn(codec->dev, "No impedence range reported for jack\n");
3046 3066
3047#ifndef CONFIG_SND_SOC_WM8994_MODULE 3067#ifndef CONFIG_SND_SOC_WM8994_MODULE
3048 trace_snd_soc_jack_irq(dev_name(codec->dev)); 3068 trace_snd_soc_jack_irq(dev_name(codec->dev));
@@ -3163,6 +3183,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3163 switch (wm8994->revision) { 3183 switch (wm8994->revision) {
3164 case 0: 3184 case 0:
3165 case 1: 3185 case 1:
3186 case 2:
3187 case 3:
3166 wm8994->hubs.dcs_codes_l = -9; 3188 wm8994->hubs.dcs_codes_l = -9;
3167 wm8994->hubs.dcs_codes_r = -5; 3189 wm8994->hubs.dcs_codes_r = -5;
3168 break; 3190 break;
@@ -3180,9 +3202,9 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3180 3202
3181 wm8994_request_irq(codec->control_data, WM8994_IRQ_FIFOS_ERR, 3203 wm8994_request_irq(codec->control_data, WM8994_IRQ_FIFOS_ERR,
3182 wm8994_fifo_error, "FIFO error", codec); 3204 wm8994_fifo_error, "FIFO error", codec);
3183 wm8994_request_irq(wm8994->control_data, WM8994_IRQ_TEMP_WARN, 3205 wm8994_request_irq(codec->control_data, WM8994_IRQ_TEMP_WARN,
3184 wm8994_temp_warn, "Thermal warning", codec); 3206 wm8994_temp_warn, "Thermal warning", codec);
3185 wm8994_request_irq(wm8994->control_data, WM8994_IRQ_TEMP_SHUT, 3207 wm8994_request_irq(codec->control_data, WM8994_IRQ_TEMP_SHUT,
3186 wm8994_temp_shut, "Thermal shutdown", codec); 3208 wm8994_temp_shut, "Thermal shutdown", codec);
3187 3209
3188 ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_DCS_DONE, 3210 ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_DCS_DONE,
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 645c980d6b80..a33b04d17195 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -1968,6 +1968,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
1968 break; 1968 break;
1969 case 24576000: 1969 case 24576000:
1970 ratediv = WM8996_SYSCLK_DIV; 1970 ratediv = WM8996_SYSCLK_DIV;
1971 wm8996->sysclk /= 2;
1971 case 12288000: 1972 case 12288000:
1972 snd_soc_update_bits(codec, WM8996_AIF_RATE, 1973 snd_soc_update_bits(codec, WM8996_AIF_RATE,
1973 WM8996_SYSCLK_RATE, WM8996_SYSCLK_RATE); 1974 WM8996_SYSCLK_RATE, WM8996_SYSCLK_RATE);
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 3cd35a02c28c..4a398c3bfe84 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -807,7 +807,6 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
807 mdelay(100); 807 mdelay(100);
808 808
809 /* Normal bias enable & soft start off */ 809 /* Normal bias enable & soft start off */
810 reg |= WM9081_BIAS_ENA;
811 reg &= ~WM9081_VMID_RAMP; 810 reg &= ~WM9081_VMID_RAMP;
812 snd_soc_write(codec, WM9081_VMID_CONTROL, reg); 811 snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
813 812
@@ -818,7 +817,7 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
818 } 817 }
819 818
820 /* VMID 2*240k */ 819 /* VMID 2*240k */
821 reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1); 820 reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
822 reg &= ~WM9081_VMID_SEL_MASK; 821 reg &= ~WM9081_VMID_SEL_MASK;
823 reg |= 0x04; 822 reg |= 0x04;
824 snd_soc_write(codec, WM9081_VMID_CONTROL, reg); 823 snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
@@ -830,14 +829,15 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
830 break; 829 break;
831 830
832 case SND_SOC_BIAS_OFF: 831 case SND_SOC_BIAS_OFF:
833 /* Startup bias source */ 832 /* Startup bias source and disable bias */
834 reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1); 833 reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
835 reg |= WM9081_BIAS_SRC; 834 reg |= WM9081_BIAS_SRC;
835 reg &= ~WM9081_BIAS_ENA;
836 snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg); 836 snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
837 837
838 /* Disable VMID and biases with soft ramping */ 838 /* Disable VMID with soft ramping */
839 reg = snd_soc_read(codec, WM9081_VMID_CONTROL); 839 reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
840 reg &= ~(WM9081_VMID_SEL_MASK | WM9081_BIAS_ENA); 840 reg &= ~WM9081_VMID_SEL_MASK;
841 reg |= WM9081_VMID_RAMP; 841 reg |= WM9081_VMID_RAMP;
842 snd_soc_write(codec, WM9081_VMID_CONTROL, reg); 842 snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
843 843
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 2b5252c9e377..f94c06057c64 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -177,19 +177,19 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec)
177} 177}
178 178
179static const unsigned int in_tlv[] = { 179static const unsigned int in_tlv[] = {
180 TLV_DB_RANGE_HEAD(6), 180 TLV_DB_RANGE_HEAD(3),
181 0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0), 181 0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0),
182 1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0), 182 1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0),
183 4, 6, TLV_DB_SCALE_ITEM(600, 600, 0), 183 4, 6, TLV_DB_SCALE_ITEM(600, 600, 0),
184}; 184};
185static const unsigned int mix_tlv[] = { 185static const unsigned int mix_tlv[] = {
186 TLV_DB_RANGE_HEAD(4), 186 TLV_DB_RANGE_HEAD(2),
187 0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0), 187 0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0),
188 3, 3, TLV_DB_SCALE_ITEM(0, 0, 0), 188 3, 3, TLV_DB_SCALE_ITEM(0, 0, 0),
189}; 189};
190static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0); 190static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0);
191static const unsigned int spkboost_tlv[] = { 191static const unsigned int spkboost_tlv[] = {
192 TLV_DB_RANGE_HEAD(7), 192 TLV_DB_RANGE_HEAD(2),
193 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0), 193 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
194 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0), 194 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
195}; 195};
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 84f33d4ea2cd..48e61e912400 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -40,7 +40,7 @@ static const DECLARE_TLV_DB_SCALE(outmix_tlv, -2100, 300, 0);
40static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1); 40static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1);
41static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0); 41static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0);
42static const unsigned int spkboost_tlv[] = { 42static const unsigned int spkboost_tlv[] = {
43 TLV_DB_RANGE_HEAD(7), 43 TLV_DB_RANGE_HEAD(2),
44 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0), 44 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
45 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0), 45 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
46}; 46};
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 0268cf989736..83c4bd5b2dd7 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -694,6 +694,7 @@ static int __devinit fsl_ssi_probe(struct platform_device *pdev)
694 694
695 /* Initialize the the device_attribute structure */ 695 /* Initialize the the device_attribute structure */
696 dev_attr = &ssi_private->dev_attr; 696 dev_attr = &ssi_private->dev_attr;
697 sysfs_attr_init(&dev_attr->attr);
697 dev_attr->attr.name = "statistics"; 698 dev_attr->attr.name = "statistics";
698 dev_attr->attr.mode = S_IRUGO; 699 dev_attr->attr.mode = S_IRUGO;
699 dev_attr->show = fsl_sysfs_ssi_show; 700 dev_attr->show = fsl_sysfs_ssi_show;
diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c
index 31af405bda84..ae49f1c78c6d 100644
--- a/sound/soc/fsl/mpc8610_hpcd.c
+++ b/sound/soc/fsl/mpc8610_hpcd.c
@@ -392,7 +392,8 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
392 } 392 }
393 393
394 if (strcasecmp(sprop, "i2s-slave") == 0) { 394 if (strcasecmp(sprop, "i2s-slave") == 0) {
395 machine_data->dai_format = SND_SOC_DAIFMT_I2S; 395 machine_data->dai_format =
396 SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM;
396 machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT; 397 machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
397 machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN; 398 machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
398 399
@@ -409,31 +410,38 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
409 } 410 }
410 machine_data->clk_frequency = be32_to_cpup(iprop); 411 machine_data->clk_frequency = be32_to_cpup(iprop);
411 } else if (strcasecmp(sprop, "i2s-master") == 0) { 412 } else if (strcasecmp(sprop, "i2s-master") == 0) {
412 machine_data->dai_format = SND_SOC_DAIFMT_I2S; 413 machine_data->dai_format =
414 SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS;
413 machine_data->codec_clk_direction = SND_SOC_CLOCK_IN; 415 machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
414 machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT; 416 machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
415 } else if (strcasecmp(sprop, "lj-slave") == 0) { 417 } else if (strcasecmp(sprop, "lj-slave") == 0) {
416 machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J; 418 machine_data->dai_format =
419 SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM;
417 machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT; 420 machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
418 machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN; 421 machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
419 } else if (strcasecmp(sprop, "lj-master") == 0) { 422 } else if (strcasecmp(sprop, "lj-master") == 0) {
420 machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J; 423 machine_data->dai_format =
424 SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBS_CFS;
421 machine_data->codec_clk_direction = SND_SOC_CLOCK_IN; 425 machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
422 machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT; 426 machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
423 } else if (strcasecmp(sprop, "rj-slave") == 0) { 427 } else if (strcasecmp(sprop, "rj-slave") == 0) {
424 machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J; 428 machine_data->dai_format =
429 SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBM_CFM;
425 machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT; 430 machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
426 machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN; 431 machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
427 } else if (strcasecmp(sprop, "rj-master") == 0) { 432 } else if (strcasecmp(sprop, "rj-master") == 0) {
428 machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J; 433 machine_data->dai_format =
434 SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBS_CFS;
429 machine_data->codec_clk_direction = SND_SOC_CLOCK_IN; 435 machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
430 machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT; 436 machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
431 } else if (strcasecmp(sprop, "ac97-slave") == 0) { 437 } else if (strcasecmp(sprop, "ac97-slave") == 0) {
432 machine_data->dai_format = SND_SOC_DAIFMT_AC97; 438 machine_data->dai_format =
439 SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBM_CFM;
433 machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT; 440 machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
434 machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN; 441 machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
435 } else if (strcasecmp(sprop, "ac97-master") == 0) { 442 } else if (strcasecmp(sprop, "ac97-master") == 0) {
436 machine_data->dai_format = SND_SOC_DAIFMT_AC97; 443 machine_data->dai_format =
444 SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBS_CFS;
437 machine_data->codec_clk_direction = SND_SOC_CLOCK_IN; 445 machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
438 machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT; 446 machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
439 } else { 447 } else {
diff --git a/sound/soc/imx/Kconfig b/sound/soc/imx/Kconfig
index b133bfcc5848..738391757f2c 100644
--- a/sound/soc/imx/Kconfig
+++ b/sound/soc/imx/Kconfig
@@ -28,7 +28,7 @@ config SND_MXC_SOC_WM1133_EV1
28 28
29config SND_SOC_MX27VIS_AIC32X4 29config SND_SOC_MX27VIS_AIC32X4
30 tristate "SoC audio support for Visstrim M10 boards" 30 tristate "SoC audio support for Visstrim M10 boards"
31 depends on MACH_IMX27_VISSTRIM_M10 31 depends on MACH_IMX27_VISSTRIM_M10 && I2C
32 select SND_SOC_TLV320AIC32X4 32 select SND_SOC_TLV320AIC32X4
33 select SND_MXC_SOC_MX2 33 select SND_MXC_SOC_MX2
34 help 34 help
diff --git a/sound/soc/kirkwood/Kconfig b/sound/soc/kirkwood/Kconfig
index 8f49e165f4d1..c62d715235e2 100644
--- a/sound/soc/kirkwood/Kconfig
+++ b/sound/soc/kirkwood/Kconfig
@@ -12,6 +12,7 @@ config SND_KIRKWOOD_SOC_I2S
12config SND_KIRKWOOD_SOC_OPENRD 12config SND_KIRKWOOD_SOC_OPENRD
13 tristate "SoC Audio support for Kirkwood Openrd Client" 13 tristate "SoC Audio support for Kirkwood Openrd Client"
14 depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE) 14 depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
15 depends on I2C
15 select SND_KIRKWOOD_SOC_I2S 16 select SND_KIRKWOOD_SOC_I2S
16 select SND_SOC_CS42L51 17 select SND_SOC_CS42L51
17 help 18 help
@@ -20,7 +21,7 @@ config SND_KIRKWOOD_SOC_OPENRD
20 21
21config SND_KIRKWOOD_SOC_T5325 22config SND_KIRKWOOD_SOC_T5325
22 tristate "SoC Audio support for HP t5325" 23 tristate "SoC Audio support for HP t5325"
23 depends on SND_KIRKWOOD_SOC && MACH_T5325 24 depends on SND_KIRKWOOD_SOC && MACH_T5325 && I2C
24 select SND_KIRKWOOD_SOC_I2S 25 select SND_KIRKWOOD_SOC_I2S
25 select SND_SOC_ALC5623 26 select SND_SOC_ALC5623
26 help 27 help
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c
index dea5aa4aa647..f39d7dd9fbcb 100644
--- a/sound/soc/mxs/mxs-pcm.c
+++ b/sound/soc/mxs/mxs-pcm.c
@@ -357,3 +357,6 @@ static void __exit snd_mxs_pcm_exit(void)
357 platform_driver_unregister(&mxs_pcm_driver); 357 platform_driver_unregister(&mxs_pcm_driver);
358} 358}
359module_exit(snd_mxs_pcm_exit); 359module_exit(snd_mxs_pcm_exit);
360
361MODULE_LICENSE("GPL");
362MODULE_ALIAS("platform:mxs-pcm-audio");
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 7fbeaec06eb4..1c57f6630a48 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -171,3 +171,4 @@ module_exit(mxs_sgtl5000_exit);
171MODULE_AUTHOR("Freescale Semiconductor, Inc."); 171MODULE_AUTHOR("Freescale Semiconductor, Inc.");
172MODULE_DESCRIPTION("MXS ALSA SoC Machine driver"); 172MODULE_DESCRIPTION("MXS ALSA SoC Machine driver");
173MODULE_LICENSE("GPL"); 173MODULE_LICENSE("GPL");
174MODULE_ALIAS("platform:mxs-sgtl5000");
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index 9c0edad90d8b..a4e3237956e2 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -365,7 +365,8 @@ static int __devinit nuc900_ac97_drvprobe(struct platform_device *pdev)
365 if (ret) 365 if (ret)
366 goto out3; 366 goto out3;
367 367
368 mfp_set_groupg(nuc900_audio->dev); /* enbale ac97 multifunction pin*/ 368 /* enbale ac97 multifunction pin */
369 mfp_set_groupg(nuc900_audio->dev, "nuc900-audio");
369 370
370 return 0; 371 return 0;
371 372
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index ffd2242e305f..a0f7d3cfa470 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -151,6 +151,7 @@ config SND_SOC_ZYLONITE
151config SND_SOC_RAUMFELD 151config SND_SOC_RAUMFELD
152 tristate "SoC Audio support Raumfeld audio adapter" 152 tristate "SoC Audio support Raumfeld audio adapter"
153 depends on SND_PXA2XX_SOC && (MACH_RAUMFELD_SPEAKER || MACH_RAUMFELD_CONNECTOR) 153 depends on SND_PXA2XX_SOC && (MACH_RAUMFELD_SPEAKER || MACH_RAUMFELD_CONNECTOR)
154 depends on I2C && SPI_MASTER
154 select SND_PXA_SOC_SSP 155 select SND_PXA_SOC_SSP
155 select SND_SOC_CS4270 156 select SND_SOC_CS4270
156 select SND_SOC_AK4104 157 select SND_SOC_AK4104
@@ -159,7 +160,7 @@ config SND_SOC_RAUMFELD
159 160
160config SND_PXA2XX_SOC_HX4700 161config SND_PXA2XX_SOC_HX4700
161 tristate "SoC Audio support for HP iPAQ hx4700" 162 tristate "SoC Audio support for HP iPAQ hx4700"
162 depends on SND_PXA2XX_SOC && MACH_H4700 163 depends on SND_PXA2XX_SOC && MACH_H4700 && I2C
163 select SND_PXA2XX_SOC_I2S 164 select SND_PXA2XX_SOC_I2S
164 select SND_SOC_AK4641 165 select SND_SOC_AK4641
165 help 166 help
diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c
index 65c124831a00..c664e33fb6d7 100644
--- a/sound/soc/pxa/hx4700.c
+++ b/sound/soc/pxa/hx4700.c
@@ -209,9 +209,10 @@ static int __devinit hx4700_audio_probe(struct platform_device *pdev)
209 snd_soc_card_hx4700.dev = &pdev->dev; 209 snd_soc_card_hx4700.dev = &pdev->dev;
210 ret = snd_soc_register_card(&snd_soc_card_hx4700); 210 ret = snd_soc_register_card(&snd_soc_card_hx4700);
211 if (ret) 211 if (ret)
212 return ret; 212 gpio_free_array(hx4700_audio_gpios,
213 ARRAY_SIZE(hx4700_audio_gpios));
213 214
214 return 0; 215 return ret;
215} 216}
216 217
217static int __devexit hx4700_audio_remove(struct platform_device *pdev) 218static int __devexit hx4700_audio_remove(struct platform_device *pdev)
diff --git a/sound/soc/samsung/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c
index 1826acf20f7c..8e523fd9189e 100644
--- a/sound/soc/samsung/jive_wm8750.c
+++ b/sound/soc/samsung/jive_wm8750.c
@@ -101,7 +101,6 @@ static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd)
101{ 101{
102 struct snd_soc_codec *codec = rtd->codec; 102 struct snd_soc_codec *codec = rtd->codec;
103 struct snd_soc_dapm_context *dapm = &codec->dapm; 103 struct snd_soc_dapm_context *dapm = &codec->dapm;
104 int err;
105 104
106 /* These endpoints are not being used. */ 105 /* These endpoints are not being used. */
107 snd_soc_dapm_nc_pin(dapm, "LINPUT2"); 106 snd_soc_dapm_nc_pin(dapm, "LINPUT2");
@@ -131,7 +130,7 @@ static struct snd_soc_card snd_soc_machine_jive = {
131 .dai_link = &jive_dai, 130 .dai_link = &jive_dai,
132 .num_links = 1, 131 .num_links = 1,
133 132
134 .dapm_widgtets = wm8750_dapm_widgets, 133 .dapm_widgets = wm8750_dapm_widgets,
135 .num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets), 134 .num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
136 .dapm_routes = audio_map, 135 .dapm_routes = audio_map,
137 .num_dapm_routes = ARRAY_SIZE(audio_map), 136 .num_dapm_routes = ARRAY_SIZE(audio_map),
diff --git a/sound/soc/samsung/smdk2443_wm9710.c b/sound/soc/samsung/smdk2443_wm9710.c
index 3a0dbfc793f0..8bd1dc5706bf 100644
--- a/sound/soc/samsung/smdk2443_wm9710.c
+++ b/sound/soc/samsung/smdk2443_wm9710.c
@@ -12,6 +12,7 @@
12 * 12 *
13 */ 13 */
14 14
15#include <linux/module.h>
15#include <sound/soc.h> 16#include <sound/soc.h>
16 17
17static struct snd_soc_card smdk2443; 18static struct snd_soc_card smdk2443;
diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c
index f75e43997d5b..ad9ac42522e2 100644
--- a/sound/soc/samsung/smdk_wm8994.c
+++ b/sound/soc/samsung/smdk_wm8994.c
@@ -9,6 +9,7 @@
9 9
10#include "../codecs/wm8994.h" 10#include "../codecs/wm8994.h"
11#include <sound/pcm_params.h> 11#include <sound/pcm_params.h>
12#include <linux/module.h>
12 13
13 /* 14 /*
14 * Default CFG switch settings to use this driver: 15 * Default CFG switch settings to use this driver:
diff --git a/sound/soc/samsung/speyside.c b/sound/soc/samsung/speyside.c
index 85bf541a771d..4b8e35410eb1 100644
--- a/sound/soc/samsung/speyside.c
+++ b/sound/soc/samsung/speyside.c
@@ -191,7 +191,7 @@ static int speyside_late_probe(struct snd_soc_card *card)
191 snd_soc_dapm_ignore_suspend(&card->dapm, "Headset Mic"); 191 snd_soc_dapm_ignore_suspend(&card->dapm, "Headset Mic");
192 snd_soc_dapm_ignore_suspend(&card->dapm, "Main AMIC"); 192 snd_soc_dapm_ignore_suspend(&card->dapm, "Main AMIC");
193 snd_soc_dapm_ignore_suspend(&card->dapm, "Main DMIC"); 193 snd_soc_dapm_ignore_suspend(&card->dapm, "Main DMIC");
194 snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker"); 194 snd_soc_dapm_ignore_suspend(&card->dapm, "Main Speaker");
195 snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Output"); 195 snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Output");
196 snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Input"); 196 snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Input");
197 197
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index a5d3685a5d38..a25fa63ce9a2 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -709,6 +709,12 @@ int snd_soc_resume(struct device *dev)
709 struct snd_soc_card *card = dev_get_drvdata(dev); 709 struct snd_soc_card *card = dev_get_drvdata(dev);
710 int i, ac97_control = 0; 710 int i, ac97_control = 0;
711 711
712 /* If the initialization of this soc device failed, there is no codec
713 * associated with it. Just bail out in this case.
714 */
715 if (list_empty(&card->codec_dev_list))
716 return 0;
717
712 /* AC97 devices might have other drivers hanging off them so 718 /* AC97 devices might have other drivers hanging off them so
713 * need to resume immediately. Other drivers don't have that 719 * need to resume immediately. Other drivers don't have that
714 * problem and may take a substantial amount of time to resume 720 * problem and may take a substantial amount of time to resume
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 0c12b98484bd..4220bb0f2730 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -58,7 +58,36 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params)
58} 58}
59EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk); 59EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk);
60 60
61static struct snd_soc_platform_driver dummy_platform; 61static const struct snd_pcm_hardware dummy_dma_hardware = {
62 .formats = 0xffffffff,
63 .channels_min = 1,
64 .channels_max = UINT_MAX,
65
66 /* Random values to keep userspace happy when checking constraints */
67 .info = SNDRV_PCM_INFO_INTERLEAVED |
68 SNDRV_PCM_INFO_BLOCK_TRANSFER,
69 .buffer_bytes_max = 128*1024,
70 .period_bytes_min = PAGE_SIZE,
71 .period_bytes_max = PAGE_SIZE*2,
72 .periods_min = 2,
73 .periods_max = 128,
74};
75
76static int dummy_dma_open(struct snd_pcm_substream *substream)
77{
78 snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
79
80 return 0;
81}
82
83static struct snd_pcm_ops dummy_dma_ops = {
84 .open = dummy_dma_open,
85 .ioctl = snd_pcm_lib_ioctl,
86};
87
88static struct snd_soc_platform_driver dummy_platform = {
89 .ops = &dummy_dma_ops,
90};
62 91
63static __devinit int snd_soc_dummy_probe(struct platform_device *pdev) 92static __devinit int snd_soc_dummy_probe(struct platform_device *pdev)
64{ 93{
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 60f65ace7474..ab23869c01bb 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -765,10 +765,61 @@ static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
765 * interface to ALSA control for feature/mixer units 765 * interface to ALSA control for feature/mixer units
766 */ 766 */
767 767
768/* volume control quirks */
769static void volume_control_quirks(struct usb_mixer_elem_info *cval,
770 struct snd_kcontrol *kctl)
771{
772 switch (cval->mixer->chip->usb_id) {
773 case USB_ID(0x0471, 0x0101):
774 case USB_ID(0x0471, 0x0104):
775 case USB_ID(0x0471, 0x0105):
776 case USB_ID(0x0672, 0x1041):
777 /* quirk for UDA1321/N101.
778 * note that detection between firmware 2.1.1.7 (N101)
779 * and later 2.1.1.21 is not very clear from datasheets.
780 * I hope that the min value is -15360 for newer firmware --jk
781 */
782 if (!strcmp(kctl->id.name, "PCM Playback Volume") &&
783 cval->min == -15616) {
784 snd_printk(KERN_INFO
785 "set volume quirk for UDA1321/N101 chip\n");
786 cval->max = -256;
787 }
788 break;
789
790 case USB_ID(0x046d, 0x09a4):
791 if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
792 snd_printk(KERN_INFO
793 "set volume quirk for QuickCam E3500\n");
794 cval->min = 6080;
795 cval->max = 8768;
796 cval->res = 192;
797 }
798 break;
799
800 case USB_ID(0x046d, 0x0808):
801 case USB_ID(0x046d, 0x0809):
802 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
803 case USB_ID(0x046d, 0x0991):
804 /* Most audio usb devices lie about volume resolution.
805 * Most Logitech webcams have res = 384.
806 * Proboly there is some logitech magic behind this number --fishor
807 */
808 if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
809 snd_printk(KERN_INFO
810 "set resolution quirk: cval->res = 384\n");
811 cval->res = 384;
812 }
813 break;
814
815 }
816}
817
768/* 818/*
769 * retrieve the minimum and maximum values for the specified control 819 * retrieve the minimum and maximum values for the specified control
770 */ 820 */
771static int get_min_max(struct usb_mixer_elem_info *cval, int default_min) 821static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
822 int default_min, struct snd_kcontrol *kctl)
772{ 823{
773 /* for failsafe */ 824 /* for failsafe */
774 cval->min = default_min; 825 cval->min = default_min;
@@ -844,6 +895,9 @@ static int get_min_max(struct usb_mixer_elem_info *cval, int default_min)
844 cval->initialized = 1; 895 cval->initialized = 1;
845 } 896 }
846 897
898 if (kctl)
899 volume_control_quirks(cval, kctl);
900
847 /* USB descriptions contain the dB scale in 1/256 dB unit 901 /* USB descriptions contain the dB scale in 1/256 dB unit
848 * while ALSA TLV contains in 1/100 dB unit 902 * while ALSA TLV contains in 1/100 dB unit
849 */ 903 */
@@ -864,6 +918,7 @@ static int get_min_max(struct usb_mixer_elem_info *cval, int default_min)
864 return 0; 918 return 0;
865} 919}
866 920
921#define get_min_max(cval, def) get_min_max_with_quirks(cval, def, NULL)
867 922
868/* get a feature/mixer unit info */ 923/* get a feature/mixer unit info */
869static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) 924static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
@@ -882,7 +937,7 @@ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_
882 uinfo->value.integer.max = 1; 937 uinfo->value.integer.max = 1;
883 } else { 938 } else {
884 if (!cval->initialized) { 939 if (!cval->initialized) {
885 get_min_max(cval, 0); 940 get_min_max_with_quirks(cval, 0, kcontrol);
886 if (cval->initialized && cval->dBmin >= cval->dBmax) { 941 if (cval->initialized && cval->dBmin >= cval->dBmax) {
887 kcontrol->vd[0].access &= 942 kcontrol->vd[0].access &=
888 ~(SNDRV_CTL_ELEM_ACCESS_TLV_READ | 943 ~(SNDRV_CTL_ELEM_ACCESS_TLV_READ |
@@ -1045,9 +1100,6 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
1045 cval->ch_readonly = readonly_mask; 1100 cval->ch_readonly = readonly_mask;
1046 } 1101 }
1047 1102
1048 /* get min/max values */
1049 get_min_max(cval, 0);
1050
1051 /* if all channels in the mask are marked read-only, make the control 1103 /* if all channels in the mask are marked read-only, make the control
1052 * read-only. set_cur_mix_value() will check the mask again and won't 1104 * read-only. set_cur_mix_value() will check the mask again and won't
1053 * issue write commands to read-only channels. */ 1105 * issue write commands to read-only channels. */
@@ -1069,6 +1121,9 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
1069 len = snd_usb_copy_string_desc(state, nameid, 1121 len = snd_usb_copy_string_desc(state, nameid,
1070 kctl->id.name, sizeof(kctl->id.name)); 1122 kctl->id.name, sizeof(kctl->id.name));
1071 1123
1124 /* get min/max values */
1125 get_min_max_with_quirks(cval, 0, kctl);
1126
1072 switch (control) { 1127 switch (control) {
1073 case UAC_FU_MUTE: 1128 case UAC_FU_MUTE:
1074 case UAC_FU_VOLUME: 1129 case UAC_FU_VOLUME:
@@ -1118,51 +1173,6 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
1118 break; 1173 break;
1119 } 1174 }
1120 1175
1121 /* volume control quirks */
1122 switch (state->chip->usb_id) {
1123 case USB_ID(0x0471, 0x0101):
1124 case USB_ID(0x0471, 0x0104):
1125 case USB_ID(0x0471, 0x0105):
1126 case USB_ID(0x0672, 0x1041):
1127 /* quirk for UDA1321/N101.
1128 * note that detection between firmware 2.1.1.7 (N101)
1129 * and later 2.1.1.21 is not very clear from datasheets.
1130 * I hope that the min value is -15360 for newer firmware --jk
1131 */
1132 if (!strcmp(kctl->id.name, "PCM Playback Volume") &&
1133 cval->min == -15616) {
1134 snd_printk(KERN_INFO
1135 "set volume quirk for UDA1321/N101 chip\n");
1136 cval->max = -256;
1137 }
1138 break;
1139
1140 case USB_ID(0x046d, 0x09a4):
1141 if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
1142 snd_printk(KERN_INFO
1143 "set volume quirk for QuickCam E3500\n");
1144 cval->min = 6080;
1145 cval->max = 8768;
1146 cval->res = 192;
1147 }
1148 break;
1149
1150 case USB_ID(0x046d, 0x0808):
1151 case USB_ID(0x046d, 0x0809):
1152 case USB_ID(0x046d, 0x0991):
1153 /* Most audio usb devices lie about volume resolution.
1154 * Most Logitech webcams have res = 384.
1155 * Proboly there is some logitech magic behind this number --fishor
1156 */
1157 if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
1158 snd_printk(KERN_INFO
1159 "set resolution quirk: cval->res = 384\n");
1160 cval->res = 384;
1161 }
1162 break;
1163
1164 }
1165
1166 range = (cval->max - cval->min) / cval->res; 1176 range = (cval->max - cval->min) / cval->res;
1167 /* Are there devices with volume range more than 255? I use a bit more 1177 /* Are there devices with volume range more than 255? I use a bit more
1168 * to be sure. 384 is a resolution magic number found on Logitech 1178 * to be sure. 384 is a resolution magic number found on Logitech
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index b61945f3af9e..32d2a21f2e3b 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1633,6 +1633,37 @@ YAMAHA_DEVICE(0x7010, "UB99"),
1633 } 1633 }
1634}, 1634},
1635{ 1635{
1636 /* Roland GAIA SH-01 */
1637 USB_DEVICE(0x0582, 0x0111),
1638 .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
1639 .vendor_name = "Roland",
1640 .product_name = "GAIA",
1641 .ifnum = QUIRK_ANY_INTERFACE,
1642 .type = QUIRK_COMPOSITE,
1643 .data = (const struct snd_usb_audio_quirk[]) {
1644 {
1645 .ifnum = 0,
1646 .type = QUIRK_AUDIO_STANDARD_INTERFACE
1647 },
1648 {
1649 .ifnum = 1,
1650 .type = QUIRK_AUDIO_STANDARD_INTERFACE
1651 },
1652 {
1653 .ifnum = 2,
1654 .type = QUIRK_MIDI_FIXED_ENDPOINT,
1655 .data = &(const struct snd_usb_midi_endpoint_info) {
1656 .out_cables = 0x0003,
1657 .in_cables = 0x0003
1658 }
1659 },
1660 {
1661 .ifnum = -1
1662 }
1663 }
1664 }
1665},
1666{
1636 USB_DEVICE(0x0582, 0x0113), 1667 USB_DEVICE(0x0582, 0x0113),
1637 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { 1668 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
1638 /* .vendor_name = "BOSS", */ 1669 /* .vendor_name = "BOSS", */
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 2e5bc7344026..a3ddac0deffd 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -137,12 +137,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
137 return -ENOMEM; 137 return -ENOMEM;
138 } 138 }
139 if (fp->nr_rates > 0) { 139 if (fp->nr_rates > 0) {
140 rate_table = kmalloc(sizeof(int) * fp->nr_rates, GFP_KERNEL); 140 rate_table = kmemdup(fp->rate_table,
141 sizeof(int) * fp->nr_rates, GFP_KERNEL);
141 if (!rate_table) { 142 if (!rate_table) {
142 kfree(fp); 143 kfree(fp);
143 return -ENOMEM; 144 return -ENOMEM;
144 } 145 }
145 memcpy(rate_table, fp->rate_table, sizeof(int) * fp->nr_rates);
146 fp->rate_table = rate_table; 146 fp->rate_table = rate_table;
147 } 147 }
148 148
@@ -224,10 +224,9 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
224 if (altsd->bNumEndpoints != 1) 224 if (altsd->bNumEndpoints != 1)
225 return -ENXIO; 225 return -ENXIO;
226 226
227 fp = kmalloc(sizeof(*fp), GFP_KERNEL); 227 fp = kmemdup(&ua_format, sizeof(*fp), GFP_KERNEL);
228 if (!fp) 228 if (!fp)
229 return -ENOMEM; 229 return -ENOMEM;
230 memcpy(fp, &ua_format, sizeof(*fp));
231 230
232 fp->iface = altsd->bInterfaceNumber; 231 fp->iface = altsd->bInterfaceNumber;
233 fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress; 232 fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 7d98676808d8..955930e0a5c3 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -463,7 +463,8 @@ static int run_perf_stat(int argc __used, const char **argv)
463 463
464 list_for_each_entry(counter, &evsel_list->entries, node) { 464 list_for_each_entry(counter, &evsel_list->entries, node) {
465 if (create_perf_stat_counter(counter, first) < 0) { 465 if (create_perf_stat_counter(counter, first) < 0) {
466 if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) { 466 if (errno == EINVAL || errno == ENOSYS ||
467 errno == ENOENT || errno == EOPNOTSUPP) {
467 if (verbose) 468 if (verbose)
468 ui__warning("%s event is not supported by the kernel.\n", 469 ui__warning("%s event is not supported by the kernel.\n",
469 event_name(counter)); 470 event_name(counter));
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index e42626422587..d7915d4e77cb 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -34,6 +34,16 @@ int __perf_evsel__sample_size(u64 sample_type)
34 return size; 34 return size;
35} 35}
36 36
37static void hists__init(struct hists *hists)
38{
39 memset(hists, 0, sizeof(*hists));
40 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
41 hists->entries_in = &hists->entries_in_array[0];
42 hists->entries_collapsed = RB_ROOT;
43 hists->entries = RB_ROOT;
44 pthread_mutex_init(&hists->lock, NULL);
45}
46
37void perf_evsel__init(struct perf_evsel *evsel, 47void perf_evsel__init(struct perf_evsel *evsel,
38 struct perf_event_attr *attr, int idx) 48 struct perf_event_attr *attr, int idx)
39{ 49{
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index bcd05d05b4f0..33c17a2b2a81 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -388,7 +388,7 @@ static int write_event_desc(int fd, struct perf_header *h __used,
388 /* 388 /*
389 * write event string as passed on cmdline 389 * write event string as passed on cmdline
390 */ 390 */
391 ret = do_write_string(fd, attr->name); 391 ret = do_write_string(fd, event_name(attr));
392 if (ret < 0) 392 if (ret < 0)
393 return ret; 393 return ret;
394 /* 394 /*
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index a36a3fa81ffb..abef2703cd24 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1211,13 +1211,3 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
1211 1211
1212 return ret; 1212 return ret;
1213} 1213}
1214
1215void hists__init(struct hists *hists)
1216{
1217 memset(hists, 0, sizeof(*hists));
1218 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1219 hists->entries_in = &hists->entries_in_array[0];
1220 hists->entries_collapsed = RB_ROOT;
1221 hists->entries = RB_ROOT;
1222 pthread_mutex_init(&hists->lock, NULL);
1223}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index c86c1d27bd1e..89289c8e935e 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -63,8 +63,6 @@ struct hists {
63 struct callchain_cursor callchain_cursor; 63 struct callchain_cursor callchain_cursor;
64}; 64};
65 65
66void hists__init(struct hists *hists);
67
68struct hist_entry *__hists__add_entry(struct hists *self, 66struct hist_entry *__hists__add_entry(struct hists *self,
69 struct addr_location *al, 67 struct addr_location *al,
70 struct symbol *parent, u64 period); 68 struct symbol *parent, u64 period);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 85c1e6b76f0a..0f4555ce9063 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1333,6 +1333,10 @@ int perf_session__cpu_bitmap(struct perf_session *session,
1333 } 1333 }
1334 1334
1335 map = cpu_map__new(cpu_list); 1335 map = cpu_map__new(cpu_list);
1336 if (map == NULL) {
1337 pr_err("Invalid cpu_list\n");
1338 return -1;
1339 }
1336 1340
1337 for (i = 0; i < map->nr; i++) { 1341 for (i = 0; i < map->nr; i++) {
1338 int cpu = map->map[i]; 1342 int cpu = map->map[i];
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 0a7ed5b5e281..6c164dc9ee95 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -1537,6 +1537,8 @@ process_flags(struct event *event, struct print_arg *arg, char **tok)
1537 field = malloc_or_die(sizeof(*field)); 1537 field = malloc_or_die(sizeof(*field));
1538 1538
1539 type = process_arg(event, field, &token); 1539 type = process_arg(event, field, &token);
1540 while (type == EVENT_OP)
1541 type = process_op(event, field, &token);
1540 if (test_type_token(type, token, EVENT_DELIM, ",")) 1542 if (test_type_token(type, token, EVENT_DELIM, ","))
1541 goto out_free; 1543 goto out_free;
1542 1544
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 8d02ccb10c59..8b4c2535b266 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -42,6 +42,7 @@ $default{"BISECT_MANUAL"} = 0;
42$default{"BISECT_SKIP"} = 1; 42$default{"BISECT_SKIP"} = 1;
43$default{"SUCCESS_LINE"} = "login:"; 43$default{"SUCCESS_LINE"} = "login:";
44$default{"DETECT_TRIPLE_FAULT"} = 1; 44$default{"DETECT_TRIPLE_FAULT"} = 1;
45$default{"NO_INSTALL"} = 0;
45$default{"BOOTED_TIMEOUT"} = 1; 46$default{"BOOTED_TIMEOUT"} = 1;
46$default{"DIE_ON_FAILURE"} = 1; 47$default{"DIE_ON_FAILURE"} = 1;
47$default{"SSH_EXEC"} = "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND"; 48$default{"SSH_EXEC"} = "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND";
@@ -84,6 +85,7 @@ my $grub_number;
84my $target; 85my $target;
85my $make; 86my $make;
86my $post_install; 87my $post_install;
88my $no_install;
87my $noclean; 89my $noclean;
88my $minconfig; 90my $minconfig;
89my $start_minconfig; 91my $start_minconfig;
@@ -115,6 +117,7 @@ my $timeout;
115my $booted_timeout; 117my $booted_timeout;
116my $detect_triplefault; 118my $detect_triplefault;
117my $console; 119my $console;
120my $reboot_success_line;
118my $success_line; 121my $success_line;
119my $stop_after_success; 122my $stop_after_success;
120my $stop_after_failure; 123my $stop_after_failure;
@@ -130,6 +133,12 @@ my %config_help;
130my %variable; 133my %variable;
131my %force_config; 134my %force_config;
132 135
136# do not force reboots on config problems
137my $no_reboot = 1;
138
139# default variables that can be used
140chomp ($variable{"PWD"} = `pwd`);
141
133$config_help{"MACHINE"} = << "EOF" 142$config_help{"MACHINE"} = << "EOF"
134 The machine hostname that you will test. 143 The machine hostname that you will test.
135EOF 144EOF
@@ -241,6 +250,7 @@ sub read_yn {
241 250
242sub get_ktest_config { 251sub get_ktest_config {
243 my ($config) = @_; 252 my ($config) = @_;
253 my $ans;
244 254
245 return if (defined($opt{$config})); 255 return if (defined($opt{$config}));
246 256
@@ -254,16 +264,17 @@ sub get_ktest_config {
254 if (defined($default{$config})) { 264 if (defined($default{$config})) {
255 print "\[$default{$config}\] "; 265 print "\[$default{$config}\] ";
256 } 266 }
257 $entered_configs{$config} = <STDIN>; 267 $ans = <STDIN>;
258 $entered_configs{$config} =~ s/^\s*(.*\S)\s*$/$1/; 268 $ans =~ s/^\s*(.*\S)\s*$/$1/;
259 if ($entered_configs{$config} =~ /^\s*$/) { 269 if ($ans =~ /^\s*$/) {
260 if ($default{$config}) { 270 if ($default{$config}) {
261 $entered_configs{$config} = $default{$config}; 271 $ans = $default{$config};
262 } else { 272 } else {
263 print "Your answer can not be blank\n"; 273 print "Your answer can not be blank\n";
264 next; 274 next;
265 } 275 }
266 } 276 }
277 $entered_configs{$config} = process_variables($ans);
267 last; 278 last;
268 } 279 }
269} 280}
@@ -298,7 +309,7 @@ sub get_ktest_configs {
298} 309}
299 310
300sub process_variables { 311sub process_variables {
301 my ($value) = @_; 312 my ($value, $remove_undef) = @_;
302 my $retval = ""; 313 my $retval = "";
303 314
304 # We want to check for '\', and it is just easier 315 # We want to check for '\', and it is just easier
@@ -316,6 +327,10 @@ sub process_variables {
316 $retval = "$retval$begin"; 327 $retval = "$retval$begin";
317 if (defined($variable{$var})) { 328 if (defined($variable{$var})) {
318 $retval = "$retval$variable{$var}"; 329 $retval = "$retval$variable{$var}";
330 } elsif (defined($remove_undef) && $remove_undef) {
331 # for if statements, any variable that is not defined,
332 # we simple convert to 0
333 $retval = "${retval}0";
319 } else { 334 } else {
320 # put back the origin piece. 335 # put back the origin piece.
321 $retval = "$retval\$\{$var\}"; 336 $retval = "$retval\$\{$var\}";
@@ -331,10 +346,17 @@ sub process_variables {
331} 346}
332 347
333sub set_value { 348sub set_value {
334 my ($lvalue, $rvalue) = @_; 349 my ($lvalue, $rvalue, $override, $overrides, $name) = @_;
335 350
336 if (defined($opt{$lvalue})) { 351 if (defined($opt{$lvalue})) {
337 die "Error: Option $lvalue defined more than once!\n"; 352 if (!$override || defined(${$overrides}{$lvalue})) {
353 my $extra = "";
354 if ($override) {
355 $extra = "In the same override section!\n";
356 }
357 die "$name: $.: Option $lvalue defined more than once!\n$extra";
358 }
359 ${$overrides}{$lvalue} = $rvalue;
338 } 360 }
339 if ($rvalue =~ /^\s*$/) { 361 if ($rvalue =~ /^\s*$/) {
340 delete $opt{$lvalue}; 362 delete $opt{$lvalue};
@@ -355,86 +377,274 @@ sub set_variable {
355 } 377 }
356} 378}
357 379
358sub read_config { 380sub process_compare {
359 my ($config) = @_; 381 my ($lval, $cmp, $rval) = @_;
382
383 # remove whitespace
384
385 $lval =~ s/^\s*//;
386 $lval =~ s/\s*$//;
387
388 $rval =~ s/^\s*//;
389 $rval =~ s/\s*$//;
390
391 if ($cmp eq "==") {
392 return $lval eq $rval;
393 } elsif ($cmp eq "!=") {
394 return $lval ne $rval;
395 }
396
397 my $statement = "$lval $cmp $rval";
398 my $ret = eval $statement;
399
400 # $@ stores error of eval
401 if ($@) {
402 return -1;
403 }
404
405 return $ret;
406}
407
408sub value_defined {
409 my ($val) = @_;
410
411 return defined($variable{$2}) ||
412 defined($opt{$2});
413}
414
415my $d = 0;
416sub process_expression {
417 my ($name, $val) = @_;
418
419 my $c = $d++;
420
421 while ($val =~ s/\(([^\(]*?)\)/\&\&\&\&VAL\&\&\&\&/) {
422 my $express = $1;
423
424 if (process_expression($name, $express)) {
425 $val =~ s/\&\&\&\&VAL\&\&\&\&/ 1 /;
426 } else {
427 $val =~ s/\&\&\&\&VAL\&\&\&\&/ 0 /;
428 }
429 }
430
431 $d--;
432 my $OR = "\\|\\|";
433 my $AND = "\\&\\&";
434
435 while ($val =~ s/^(.*?)($OR|$AND)//) {
436 my $express = $1;
437 my $op = $2;
438
439 if (process_expression($name, $express)) {
440 if ($op eq "||") {
441 return 1;
442 }
443 } else {
444 if ($op eq "&&") {
445 return 0;
446 }
447 }
448 }
449
450 if ($val =~ /(.*)(==|\!=|>=|<=|>|<)(.*)/) {
451 my $ret = process_compare($1, $2, $3);
452 if ($ret < 0) {
453 die "$name: $.: Unable to process comparison\n";
454 }
455 return $ret;
456 }
457
458 if ($val =~ /^\s*(NOT\s*)?DEFINED\s+(\S+)\s*$/) {
459 if (defined $1) {
460 return !value_defined($2);
461 } else {
462 return value_defined($2);
463 }
464 }
465
466 if ($val =~ /^\s*0\s*$/) {
467 return 0;
468 } elsif ($val =~ /^\s*\d+\s*$/) {
469 return 1;
470 }
471
472 die ("$name: $.: Undefined content $val in if statement\n");
473}
474
475sub process_if {
476 my ($name, $value) = @_;
360 477
361 open(IN, $config) || die "can't read file $config"; 478 # Convert variables and replace undefined ones with 0
479 my $val = process_variables($value, 1);
480 my $ret = process_expression $name, $val;
481
482 return $ret;
483}
484
485sub __read_config {
486 my ($config, $current_test_num) = @_;
487
488 my $in;
489 open($in, $config) || die "can't read file $config";
362 490
363 my $name = $config; 491 my $name = $config;
364 $name =~ s,.*/(.*),$1,; 492 $name =~ s,.*/(.*),$1,;
365 493
366 my $test_num = 0; 494 my $test_num = $$current_test_num;
367 my $default = 1; 495 my $default = 1;
368 my $repeat = 1; 496 my $repeat = 1;
369 my $num_tests_set = 0; 497 my $num_tests_set = 0;
370 my $skip = 0; 498 my $skip = 0;
371 my $rest; 499 my $rest;
500 my $line;
372 my $test_case = 0; 501 my $test_case = 0;
502 my $if = 0;
503 my $if_set = 0;
504 my $override = 0;
373 505
374 while (<IN>) { 506 my %overrides;
507
508 while (<$in>) {
375 509
376 # ignore blank lines and comments 510 # ignore blank lines and comments
377 next if (/^\s*$/ || /\s*\#/); 511 next if (/^\s*$/ || /\s*\#/);
378 512
379 if (/^\s*TEST_START(.*)/) { 513 if (/^\s*(TEST_START|DEFAULTS)\b(.*)/) {
380 514
381 $rest = $1; 515 my $type = $1;
516 $rest = $2;
517 $line = $2;
382 518
383 if ($num_tests_set) { 519 my $old_test_num;
384 die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n"; 520 my $old_repeat;
385 } 521 $override = 0;
522
523 if ($type eq "TEST_START") {
386 524
387 my $old_test_num = $test_num; 525 if ($num_tests_set) {
388 my $old_repeat = $repeat; 526 die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
527 }
389 528
390 $test_num += $repeat; 529 $old_test_num = $test_num;
391 $default = 0; 530 $old_repeat = $repeat;
392 $repeat = 1;
393 531
394 if ($rest =~ /\s+SKIP(.*)/) { 532 $test_num += $repeat;
395 $rest = $1; 533 $default = 0;
534 $repeat = 1;
535 } else {
536 $default = 1;
537 }
538
539 # If SKIP is anywhere in the line, the command will be skipped
540 if ($rest =~ s/\s+SKIP\b//) {
396 $skip = 1; 541 $skip = 1;
397 } else { 542 } else {
398 $test_case = 1; 543 $test_case = 1;
399 $skip = 0; 544 $skip = 0;
400 } 545 }
401 546
402 if ($rest =~ /\s+ITERATE\s+(\d+)(.*)$/) { 547 if ($rest =~ s/\sELSE\b//) {
403 $repeat = $1; 548 if (!$if) {
404 $rest = $2; 549 die "$name: $.: ELSE found with out matching IF section\n$_";
405 $repeat_tests{"$test_num"} = $repeat; 550 }
551 $if = 0;
552
553 if ($if_set) {
554 $skip = 1;
555 } else {
556 $skip = 0;
557 }
406 } 558 }
407 559
408 if ($rest =~ /\s+SKIP(.*)/) { 560 if ($rest =~ s/\sIF\s+(.*)//) {
409 $rest = $1; 561 if (process_if($name, $1)) {
410 $skip = 1; 562 $if_set = 1;
563 } else {
564 $skip = 1;
565 }
566 $if = 1;
567 } else {
568 $if = 0;
569 $if_set = 0;
411 } 570 }
412 571
413 if ($rest !~ /^\s*$/) { 572 if (!$skip) {
414 die "$name: $.: Gargbage found after TEST_START\n$_"; 573 if ($type eq "TEST_START") {
574 if ($rest =~ s/\s+ITERATE\s+(\d+)//) {
575 $repeat = $1;
576 $repeat_tests{"$test_num"} = $repeat;
577 }
578 } elsif ($rest =~ s/\sOVERRIDE\b//) {
579 # DEFAULT only
580 $override = 1;
581 # Clear previous overrides
582 %overrides = ();
583 }
584 }
585
586 if (!$skip && $rest !~ /^\s*$/) {
587 die "$name: $.: Gargbage found after $type\n$_";
415 } 588 }
416 589
417 if ($skip) { 590 if ($skip && $type eq "TEST_START") {
418 $test_num = $old_test_num; 591 $test_num = $old_test_num;
419 $repeat = $old_repeat; 592 $repeat = $old_repeat;
420 } 593 }
421 594
422 } elsif (/^\s*DEFAULTS(.*)$/) { 595 } elsif (/^\s*ELSE\b(.*)$/) {
423 $default = 1; 596 if (!$if) {
424 597 die "$name: $.: ELSE found with out matching IF section\n$_";
598 }
425 $rest = $1; 599 $rest = $1;
426 600 if ($if_set) {
427 if ($rest =~ /\s+SKIP(.*)/) {
428 $rest = $1;
429 $skip = 1; 601 $skip = 1;
602 $rest = "";
430 } else { 603 } else {
431 $skip = 0; 604 $skip = 0;
605
606 if ($rest =~ /\sIF\s+(.*)/) {
607 # May be a ELSE IF section.
608 if (!process_if($name, $1)) {
609 $skip = 1;
610 }
611 $rest = "";
612 } else {
613 $if = 0;
614 }
432 } 615 }
433 616
434 if ($rest !~ /^\s*$/) { 617 if ($rest !~ /^\s*$/) {
435 die "$name: $.: Gargbage found after DEFAULTS\n$_"; 618 die "$name: $.: Gargbage found after DEFAULTS\n$_";
436 } 619 }
437 620
621 } elsif (/^\s*INCLUDE\s+(\S+)/) {
622
623 next if ($skip);
624
625 if (!$default) {
626 die "$name: $.: INCLUDE can only be done in default sections\n$_";
627 }
628
629 my $file = process_variables($1);
630
631 if ($file !~ m,^/,) {
632 # check the path of the config file first
633 if ($config =~ m,(.*)/,) {
634 if (-f "$1/$file") {
635 $file = "$1/$file";
636 }
637 }
638 }
639
640 if ( ! -r $file ) {
641 die "$name: $.: Can't read file $file\n$_";
642 }
643
644 if (__read_config($file, \$test_num)) {
645 $test_case = 1;
646 }
647
438 } elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) { 648 } elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) {
439 649
440 next if ($skip); 650 next if ($skip);
@@ -460,10 +670,10 @@ sub read_config {
460 } 670 }
461 671
462 if ($default || $lvalue =~ /\[\d+\]$/) { 672 if ($default || $lvalue =~ /\[\d+\]$/) {
463 set_value($lvalue, $rvalue); 673 set_value($lvalue, $rvalue, $override, \%overrides, $name);
464 } else { 674 } else {
465 my $val = "$lvalue\[$test_num\]"; 675 my $val = "$lvalue\[$test_num\]";
466 set_value($val, $rvalue); 676 set_value($val, $rvalue, $override, \%overrides, $name);
467 677
468 if ($repeat > 1) { 678 if ($repeat > 1) {
469 $repeats{$val} = $repeat; 679 $repeats{$val} = $repeat;
@@ -490,13 +700,26 @@ sub read_config {
490 } 700 }
491 } 701 }
492 702
493 close(IN);
494
495 if ($test_num) { 703 if ($test_num) {
496 $test_num += $repeat - 1; 704 $test_num += $repeat - 1;
497 $opt{"NUM_TESTS"} = $test_num; 705 $opt{"NUM_TESTS"} = $test_num;
498 } 706 }
499 707
708 close($in);
709
710 $$current_test_num = $test_num;
711
712 return $test_case;
713}
714
715sub read_config {
716 my ($config) = @_;
717
718 my $test_case;
719 my $test_num = 0;
720
721 $test_case = __read_config $config, \$test_num;
722
500 # make sure we have all mandatory configs 723 # make sure we have all mandatory configs
501 get_ktest_configs; 724 get_ktest_configs;
502 725
@@ -524,6 +747,18 @@ sub __eval_option {
524 # Add space to evaluate the character before $ 747 # Add space to evaluate the character before $
525 $option = " $option"; 748 $option = " $option";
526 my $retval = ""; 749 my $retval = "";
750 my $repeated = 0;
751 my $parent = 0;
752
753 foreach my $test (keys %repeat_tests) {
754 if ($i >= $test &&
755 $i < $test + $repeat_tests{$test}) {
756
757 $repeated = 1;
758 $parent = $test;
759 last;
760 }
761 }
527 762
528 while ($option =~ /(.*?[^\\])\$\{(.*?)\}(.*)/) { 763 while ($option =~ /(.*?[^\\])\$\{(.*?)\}(.*)/) {
529 my $start = $1; 764 my $start = $1;
@@ -537,10 +772,14 @@ sub __eval_option {
537 # otherwise see if the default OPT (without [$i]) exists. 772 # otherwise see if the default OPT (without [$i]) exists.
538 773
539 my $o = "$var\[$i\]"; 774 my $o = "$var\[$i\]";
775 my $parento = "$var\[$parent\]";
540 776
541 if (defined($opt{$o})) { 777 if (defined($opt{$o})) {
542 $o = $opt{$o}; 778 $o = $opt{$o};
543 $retval = "$retval$o"; 779 $retval = "$retval$o";
780 } elsif ($repeated && defined($opt{$parento})) {
781 $o = $opt{$parento};
782 $retval = "$retval$o";
544 } elsif (defined($opt{$var})) { 783 } elsif (defined($opt{$var})) {
545 $o = $opt{$var}; 784 $o = $opt{$var};
546 $retval = "$retval$o"; 785 $retval = "$retval$o";
@@ -603,8 +842,20 @@ sub doprint {
603} 842}
604 843
605sub run_command; 844sub run_command;
845sub start_monitor;
846sub end_monitor;
847sub wait_for_monitor;
606 848
607sub reboot { 849sub reboot {
850 my ($time) = @_;
851
852 if (defined($time)) {
853 start_monitor;
854 # flush out current monitor
855 # May contain the reboot success line
856 wait_for_monitor 1;
857 }
858
608 # try to reboot normally 859 # try to reboot normally
609 if (run_command $reboot) { 860 if (run_command $reboot) {
610 if (defined($powercycle_after_reboot)) { 861 if (defined($powercycle_after_reboot)) {
@@ -615,12 +866,17 @@ sub reboot {
615 # nope? power cycle it. 866 # nope? power cycle it.
616 run_command "$power_cycle"; 867 run_command "$power_cycle";
617 } 868 }
869
870 if (defined($time)) {
871 wait_for_monitor($time, $reboot_success_line);
872 end_monitor;
873 }
618} 874}
619 875
620sub do_not_reboot { 876sub do_not_reboot {
621 my $i = $iteration; 877 my $i = $iteration;
622 878
623 return $test_type eq "build" || 879 return $test_type eq "build" || $no_reboot ||
624 ($test_type eq "patchcheck" && $opt{"PATCHCHECK_TYPE[$i]"} eq "build") || 880 ($test_type eq "patchcheck" && $opt{"PATCHCHECK_TYPE[$i]"} eq "build") ||
625 ($test_type eq "bisect" && $opt{"BISECT_TYPE[$i]"} eq "build"); 881 ($test_type eq "bisect" && $opt{"BISECT_TYPE[$i]"} eq "build");
626} 882}
@@ -693,16 +949,29 @@ sub end_monitor {
693} 949}
694 950
695sub wait_for_monitor { 951sub wait_for_monitor {
696 my ($time) = @_; 952 my ($time, $stop) = @_;
953 my $full_line = "";
697 my $line; 954 my $line;
955 my $booted = 0;
698 956
699 doprint "** Wait for monitor to settle down **\n"; 957 doprint "** Wait for monitor to settle down **\n";
700 958
701 # read the monitor and wait for the system to calm down 959 # read the monitor and wait for the system to calm down
702 do { 960 while (!$booted) {
703 $line = wait_for_input($monitor_fp, $time); 961 $line = wait_for_input($monitor_fp, $time);
704 print "$line" if (defined($line)); 962 last if (!defined($line));
705 } while (defined($line)); 963 print "$line";
964 $full_line .= $line;
965
966 if (defined($stop) && $full_line =~ /$stop/) {
967 doprint "wait for monitor detected $stop\n";
968 $booted = 1;
969 }
970
971 if ($line =~ /\n/) {
972 $full_line = "";
973 }
974 }
706 print "** Monitor flushed **\n"; 975 print "** Monitor flushed **\n";
707} 976}
708 977
@@ -719,10 +988,7 @@ sub fail {
719 # no need to reboot for just building. 988 # no need to reboot for just building.
720 if (!do_not_reboot) { 989 if (!do_not_reboot) {
721 doprint "REBOOTING\n"; 990 doprint "REBOOTING\n";
722 reboot; 991 reboot $sleep_time;
723 start_monitor;
724 wait_for_monitor $sleep_time;
725 end_monitor;
726 } 992 }
727 993
728 my $name = ""; 994 my $name = "";
@@ -854,9 +1120,12 @@ sub get_grub_index {
854 open(IN, "$ssh_grub |") 1120 open(IN, "$ssh_grub |")
855 or die "unable to get menu.lst"; 1121 or die "unable to get menu.lst";
856 1122
1123 my $found = 0;
1124
857 while (<IN>) { 1125 while (<IN>) {
858 if (/^\s*title\s+$grub_menu\s*$/) { 1126 if (/^\s*title\s+$grub_menu\s*$/) {
859 $grub_number++; 1127 $grub_number++;
1128 $found = 1;
860 last; 1129 last;
861 } elsif (/^\s*title\s/) { 1130 } elsif (/^\s*title\s/) {
862 $grub_number++; 1131 $grub_number++;
@@ -865,7 +1134,7 @@ sub get_grub_index {
865 close(IN); 1134 close(IN);
866 1135
867 die "Could not find '$grub_menu' in /boot/grub/menu on $machine" 1136 die "Could not find '$grub_menu' in /boot/grub/menu on $machine"
868 if ($grub_number < 0); 1137 if (!$found);
869 doprint "$grub_number\n"; 1138 doprint "$grub_number\n";
870} 1139}
871 1140
@@ -902,7 +1171,8 @@ sub wait_for_input
902 1171
903sub reboot_to { 1172sub reboot_to {
904 if ($reboot_type eq "grub") { 1173 if ($reboot_type eq "grub") {
905 run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch && reboot)'"; 1174 run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
1175 reboot;
906 return; 1176 return;
907 } 1177 }
908 1178
@@ -1083,6 +1353,8 @@ sub do_post_install {
1083 1353
1084sub install { 1354sub install {
1085 1355
1356 return if ($no_install);
1357
1086 run_scp "$outputdir/$build_target", "$target_image" or 1358 run_scp "$outputdir/$build_target", "$target_image" or
1087 dodie "failed to copy image"; 1359 dodie "failed to copy image";
1088 1360
@@ -1140,6 +1412,11 @@ sub get_version {
1140} 1412}
1141 1413
1142sub start_monitor_and_boot { 1414sub start_monitor_and_boot {
1415 # Make sure the stable kernel has finished booting
1416 start_monitor;
1417 wait_for_monitor 5;
1418 end_monitor;
1419
1143 get_grub_index; 1420 get_grub_index;
1144 get_version; 1421 get_version;
1145 install; 1422 install;
@@ -1250,6 +1527,10 @@ sub build {
1250 1527
1251 unlink $buildlog; 1528 unlink $buildlog;
1252 1529
1530 # Failed builds should not reboot the target
1531 my $save_no_reboot = $no_reboot;
1532 $no_reboot = 1;
1533
1253 if (defined($pre_build)) { 1534 if (defined($pre_build)) {
1254 my $ret = run_command $pre_build; 1535 my $ret = run_command $pre_build;
1255 if (!$ret && defined($pre_build_die) && 1536 if (!$ret && defined($pre_build_die) &&
@@ -1272,15 +1553,15 @@ sub build {
1272 # allow for empty configs 1553 # allow for empty configs
1273 run_command "touch $output_config"; 1554 run_command "touch $output_config";
1274 1555
1275 run_command "mv $output_config $outputdir/config_temp" or 1556 if (!$noclean) {
1276 dodie "moving .config"; 1557 run_command "mv $output_config $outputdir/config_temp" or
1558 dodie "moving .config";
1277 1559
1278 if (!$noclean && !run_command "$make mrproper") { 1560 run_command "$make mrproper" or dodie "make mrproper";
1279 dodie "make mrproper";
1280 }
1281 1561
1282 run_command "mv $outputdir/config_temp $output_config" or 1562 run_command "mv $outputdir/config_temp $output_config" or
1283 dodie "moving config_temp"; 1563 dodie "moving config_temp";
1564 }
1284 1565
1285 } elsif (!$noclean) { 1566 } elsif (!$noclean) {
1286 unlink "$output_config"; 1567 unlink "$output_config";
@@ -1318,10 +1599,15 @@ sub build {
1318 1599
1319 if (!$build_ret) { 1600 if (!$build_ret) {
1320 # bisect may need this to pass 1601 # bisect may need this to pass
1321 return 0 if ($in_bisect); 1602 if ($in_bisect) {
1603 $no_reboot = $save_no_reboot;
1604 return 0;
1605 }
1322 fail "failed build" and return 0; 1606 fail "failed build" and return 0;
1323 } 1607 }
1324 1608
1609 $no_reboot = $save_no_reboot;
1610
1325 return 1; 1611 return 1;
1326} 1612}
1327 1613
@@ -1356,10 +1642,7 @@ sub success {
1356 1642
1357 if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) { 1643 if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) {
1358 doprint "Reboot and wait $sleep_time seconds\n"; 1644 doprint "Reboot and wait $sleep_time seconds\n";
1359 reboot; 1645 reboot $sleep_time;
1360 start_monitor;
1361 wait_for_monitor $sleep_time;
1362 end_monitor;
1363 } 1646 }
1364} 1647}
1365 1648
@@ -1500,10 +1783,7 @@ sub run_git_bisect {
1500 1783
1501sub bisect_reboot { 1784sub bisect_reboot {
1502 doprint "Reboot and sleep $bisect_sleep_time seconds\n"; 1785 doprint "Reboot and sleep $bisect_sleep_time seconds\n";
1503 reboot; 1786 reboot $bisect_sleep_time;
1504 start_monitor;
1505 wait_for_monitor $bisect_sleep_time;
1506 end_monitor;
1507} 1787}
1508 1788
1509# returns 1 on success, 0 on failure, -1 on skip 1789# returns 1 on success, 0 on failure, -1 on skip
@@ -2066,10 +2346,7 @@ sub config_bisect {
2066 2346
2067sub patchcheck_reboot { 2347sub patchcheck_reboot {
2068 doprint "Reboot and sleep $patchcheck_sleep_time seconds\n"; 2348 doprint "Reboot and sleep $patchcheck_sleep_time seconds\n";
2069 reboot; 2349 reboot $patchcheck_sleep_time;
2070 start_monitor;
2071 wait_for_monitor $patchcheck_sleep_time;
2072 end_monitor;
2073} 2350}
2074 2351
2075sub patchcheck { 2352sub patchcheck {
@@ -2178,12 +2455,31 @@ sub patchcheck {
2178} 2455}
2179 2456
2180my %depends; 2457my %depends;
2458my %depcount;
2181my $iflevel = 0; 2459my $iflevel = 0;
2182my @ifdeps; 2460my @ifdeps;
2183 2461
2184# prevent recursion 2462# prevent recursion
2185my %read_kconfigs; 2463my %read_kconfigs;
2186 2464
2465sub add_dep {
2466 # $config depends on $dep
2467 my ($config, $dep) = @_;
2468
2469 if (defined($depends{$config})) {
2470 $depends{$config} .= " " . $dep;
2471 } else {
2472 $depends{$config} = $dep;
2473 }
2474
2475 # record the number of configs depending on $dep
2476 if (defined $depcount{$dep}) {
2477 $depcount{$dep}++;
2478 } else {
2479 $depcount{$dep} = 1;
2480 }
2481}
2482
2187# taken from streamline_config.pl 2483# taken from streamline_config.pl
2188sub read_kconfig { 2484sub read_kconfig {
2189 my ($kconfig) = @_; 2485 my ($kconfig) = @_;
@@ -2230,30 +2526,19 @@ sub read_kconfig {
2230 $config = $2; 2526 $config = $2;
2231 2527
2232 for (my $i = 0; $i < $iflevel; $i++) { 2528 for (my $i = 0; $i < $iflevel; $i++) {
2233 if ($i) { 2529 add_dep $config, $ifdeps[$i];
2234 $depends{$config} .= " " . $ifdeps[$i];
2235 } else {
2236 $depends{$config} = $ifdeps[$i];
2237 }
2238 $state = "DEP";
2239 } 2530 }
2240 2531
2241 # collect the depends for the config 2532 # collect the depends for the config
2242 } elsif ($state eq "NEW" && /^\s*depends\s+on\s+(.*)$/) { 2533 } elsif ($state eq "NEW" && /^\s*depends\s+on\s+(.*)$/) {
2243 2534
2244 if (defined($depends{$1})) { 2535 add_dep $config, $1;
2245 $depends{$config} .= " " . $1;
2246 } else {
2247 $depends{$config} = $1;
2248 }
2249 2536
2250 # Get the configs that select this config 2537 # Get the configs that select this config
2251 } elsif ($state ne "NONE" && /^\s*select\s+(\S+)/) { 2538 } elsif ($state eq "NEW" && /^\s*select\s+(\S+)/) {
2252 if (defined($depends{$1})) { 2539
2253 $depends{$1} .= " " . $config; 2540 # selected by depends on config
2254 } else { 2541 add_dep $1, $config;
2255 $depends{$1} = $config;
2256 }
2257 2542
2258 # Check for if statements 2543 # Check for if statements
2259 } elsif (/^if\s+(.*\S)\s*$/) { 2544 } elsif (/^if\s+(.*\S)\s*$/) {
@@ -2365,11 +2650,18 @@ sub make_new_config {
2365 close OUT; 2650 close OUT;
2366} 2651}
2367 2652
2653sub chomp_config {
2654 my ($config) = @_;
2655
2656 $config =~ s/CONFIG_//;
2657
2658 return $config;
2659}
2660
2368sub get_depends { 2661sub get_depends {
2369 my ($dep) = @_; 2662 my ($dep) = @_;
2370 2663
2371 my $kconfig = $dep; 2664 my $kconfig = chomp_config $dep;
2372 $kconfig =~ s/CONFIG_//;
2373 2665
2374 $dep = $depends{"$kconfig"}; 2666 $dep = $depends{"$kconfig"};
2375 2667
@@ -2419,8 +2711,7 @@ sub test_this_config {
2419 return undef; 2711 return undef;
2420 } 2712 }
2421 2713
2422 my $kconfig = $config; 2714 my $kconfig = chomp_config $config;
2423 $kconfig =~ s/CONFIG_//;
2424 2715
2425 # Test dependencies first 2716 # Test dependencies first
2426 if (defined($depends{"$kconfig"})) { 2717 if (defined($depends{"$kconfig"})) {
@@ -2510,6 +2801,14 @@ sub make_min_config {
2510 2801
2511 my @config_keys = keys %min_configs; 2802 my @config_keys = keys %min_configs;
2512 2803
2804 # All configs need a depcount
2805 foreach my $config (@config_keys) {
2806 my $kconfig = chomp_config $config;
2807 if (!defined $depcount{$kconfig}) {
2808 $depcount{$kconfig} = 0;
2809 }
2810 }
2811
2513 # Remove anything that was set by the make allnoconfig 2812 # Remove anything that was set by the make allnoconfig
2514 # we shouldn't need them as they get set for us anyway. 2813 # we shouldn't need them as they get set for us anyway.
2515 foreach my $config (@config_keys) { 2814 foreach my $config (@config_keys) {
@@ -2548,8 +2847,13 @@ sub make_min_config {
2548 # Now disable each config one by one and do a make oldconfig 2847 # Now disable each config one by one and do a make oldconfig
2549 # till we find a config that changes our list. 2848 # till we find a config that changes our list.
2550 2849
2551 # Put configs that did not modify the config at the end.
2552 my @test_configs = keys %min_configs; 2850 my @test_configs = keys %min_configs;
2851
2852 # Sort keys by who is most dependent on
2853 @test_configs = sort { $depcount{chomp_config($b)} <=> $depcount{chomp_config($a)} }
2854 @test_configs ;
2855
2856 # Put configs that did not modify the config at the end.
2553 my $reset = 1; 2857 my $reset = 1;
2554 for (my $i = 0; $i < $#test_configs; $i++) { 2858 for (my $i = 0; $i < $#test_configs; $i++) {
2555 if (!defined($nochange_config{$test_configs[0]})) { 2859 if (!defined($nochange_config{$test_configs[0]})) {
@@ -2659,10 +2963,7 @@ sub make_min_config {
2659 } 2963 }
2660 2964
2661 doprint "Reboot and wait $sleep_time seconds\n"; 2965 doprint "Reboot and wait $sleep_time seconds\n";
2662 reboot; 2966 reboot $sleep_time;
2663 start_monitor;
2664 wait_for_monitor $sleep_time;
2665 end_monitor;
2666 } 2967 }
2667 2968
2668 success $i; 2969 success $i;
@@ -2783,6 +3084,9 @@ sub set_test_option {
2783# First we need to do is the builds 3084# First we need to do is the builds
2784for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { 3085for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2785 3086
3087 # Do not reboot on failing test options
3088 $no_reboot = 1;
3089
2786 $iteration = $i; 3090 $iteration = $i;
2787 3091
2788 my $makecmd = set_test_option("MAKE_CMD", $i); 3092 my $makecmd = set_test_option("MAKE_CMD", $i);
@@ -2811,6 +3115,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2811 $reboot_type = set_test_option("REBOOT_TYPE", $i); 3115 $reboot_type = set_test_option("REBOOT_TYPE", $i);
2812 $grub_menu = set_test_option("GRUB_MENU", $i); 3116 $grub_menu = set_test_option("GRUB_MENU", $i);
2813 $post_install = set_test_option("POST_INSTALL", $i); 3117 $post_install = set_test_option("POST_INSTALL", $i);
3118 $no_install = set_test_option("NO_INSTALL", $i);
2814 $reboot_script = set_test_option("REBOOT_SCRIPT", $i); 3119 $reboot_script = set_test_option("REBOOT_SCRIPT", $i);
2815 $reboot_on_error = set_test_option("REBOOT_ON_ERROR", $i); 3120 $reboot_on_error = set_test_option("REBOOT_ON_ERROR", $i);
2816 $poweroff_on_error = set_test_option("POWEROFF_ON_ERROR", $i); 3121 $poweroff_on_error = set_test_option("POWEROFF_ON_ERROR", $i);
@@ -2832,6 +3137,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2832 $console = set_test_option("CONSOLE", $i); 3137 $console = set_test_option("CONSOLE", $i);
2833 $detect_triplefault = set_test_option("DETECT_TRIPLE_FAULT", $i); 3138 $detect_triplefault = set_test_option("DETECT_TRIPLE_FAULT", $i);
2834 $success_line = set_test_option("SUCCESS_LINE", $i); 3139 $success_line = set_test_option("SUCCESS_LINE", $i);
3140 $reboot_success_line = set_test_option("REBOOT_SUCCESS_LINE", $i);
2835 $stop_after_success = set_test_option("STOP_AFTER_SUCCESS", $i); 3141 $stop_after_success = set_test_option("STOP_AFTER_SUCCESS", $i);
2836 $stop_after_failure = set_test_option("STOP_AFTER_FAILURE", $i); 3142 $stop_after_failure = set_test_option("STOP_AFTER_FAILURE", $i);
2837 $stop_test_after = set_test_option("STOP_TEST_AFTER", $i); 3143 $stop_test_after = set_test_option("STOP_TEST_AFTER", $i);
@@ -2850,9 +3156,11 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2850 3156
2851 chdir $builddir || die "can't change directory to $builddir"; 3157 chdir $builddir || die "can't change directory to $builddir";
2852 3158
2853 if (!-d $tmpdir) { 3159 foreach my $dir ($tmpdir, $outputdir) {
2854 mkpath($tmpdir) or 3160 if (!-d $dir) {
2855 die "can't create $tmpdir"; 3161 mkpath($dir) or
3162 die "can't create $dir";
3163 }
2856 } 3164 }
2857 3165
2858 $ENV{"SSH_USER"} = $ssh_user; 3166 $ENV{"SSH_USER"} = $ssh_user;
@@ -2889,8 +3197,11 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2889 $run_type = "ERROR"; 3197 $run_type = "ERROR";
2890 } 3198 }
2891 3199
3200 my $installme = "";
3201 $installme = " no_install" if ($no_install);
3202
2892 doprint "\n\n"; 3203 doprint "\n\n";
2893 doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type\n\n"; 3204 doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type$installme\n\n";
2894 3205
2895 unlink $dmesg; 3206 unlink $dmesg;
2896 unlink $buildlog; 3207 unlink $buildlog;
@@ -2911,6 +3222,9 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2911 die "failed to checkout $checkout"; 3222 die "failed to checkout $checkout";
2912 } 3223 }
2913 3224
3225 $no_reboot = 0;
3226
3227
2914 if ($test_type eq "bisect") { 3228 if ($test_type eq "bisect") {
2915 bisect $i; 3229 bisect $i;
2916 next; 3230 next;
@@ -2929,6 +3243,13 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2929 build $build_type or next; 3243 build $build_type or next;
2930 } 3244 }
2931 3245
3246 if ($test_type eq "install") {
3247 get_version;
3248 install;
3249 success $i;
3250 next;
3251 }
3252
2932 if ($test_type ne "build") { 3253 if ($test_type ne "build") {
2933 my $failed = 0; 3254 my $failed = 0;
2934 start_monitor_and_boot or $failed = 1; 3255 start_monitor_and_boot or $failed = 1;
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index b8bcd14b5a4d..dbedfa196727 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -72,6 +72,128 @@
72# the same option name under the same test or as default 72# the same option name under the same test or as default
73# ktest will fail to execute, and no tests will run. 73# ktest will fail to execute, and no tests will run.
74# 74#
75# DEFAULTS OVERRIDE
76#
77# Options defined in the DEFAULTS section can not be duplicated
78# even if they are defined in two different DEFAULT sections.
79# This is done to catch mistakes where an option is added but
80# the previous option was forgotten about and not commented.
81#
82# The OVERRIDE keyword can be added to a section to allow this
83# section to override other DEFAULT sections values that have
84# been defined previously. It will only override options that
85# have been defined before its use. Options defined later
86# in a non override section will still error. The same option
87# can not be defined in the same section even if that section
88# is marked OVERRIDE.
89#
90#
91#
92# Both TEST_START and DEFAULTS sections can also have the IF keyword
93# The value after the IF must evaluate into a 0 or non 0 positive
94# integer, and can use the config variables (explained below).
95#
96# DEFAULTS IF ${IS_X86_32}
97#
98# The above will process the DEFAULTS section if the config
99# variable IS_X86_32 evaluates to a non zero positive integer
100# otherwise if it evaluates to zero, it will act the same
101# as if the SKIP keyword was used.
102#
103# The ELSE keyword can be used directly after a section with
104# a IF statement.
105#
106# TEST_START IF ${RUN_NET_TESTS}
107# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
108#
109# ELSE
110#
111# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-normal
112#
113#
114# The ELSE keyword can also contain an IF statement to allow multiple
115# if then else sections. But all the sections must be either
116# DEFAULT or TEST_START, they can not be a mixture.
117#
118# TEST_START IF ${RUN_NET_TESTS}
119# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
120#
121# ELSE IF ${RUN_DISK_TESTS}
122# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-tests
123#
124# ELSE IF ${RUN_CPU_TESTS}
125# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-cpu
126#
127# ELSE
128# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
129#
130# The if statement may also have comparisons that will and for
131# == and !=, strings may be used for both sides.
132#
133# BOX_TYPE := x86_32
134#
135# DEFAULTS IF ${BOX_TYPE} == x86_32
136# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-32
137# ELSE
138# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-64
139#
140# The DEFINED keyword can be used by the IF statements too.
141# It returns true if the given config variable or option has been defined
142# or false otherwise.
143#
144#
145# DEFAULTS IF DEFINED USE_CC
146# CC := ${USE_CC}
147# ELSE
148# CC := gcc
149#
150#
151# As well as NOT DEFINED.
152#
153# DEFAULTS IF NOT DEFINED MAKE_CMD
154# MAKE_CMD := make ARCH=x86
155#
156#
157# And/or ops (&&,||) may also be used to make complex conditionals.
158#
159# TEST_START IF (DEFINED ALL_TESTS || ${MYTEST} == boottest) && ${MACHINE} == gandalf
160#
161# Notice the use of paranthesis. Without any paranthesis the above would be
162# processed the same as:
163#
164# TEST_START IF DEFINED ALL_TESTS || (${MYTEST} == boottest && ${MACHINE} == gandalf)
165#
166#
167#
168# INCLUDE file
169#
170# The INCLUDE keyword may be used in DEFAULT sections. This will
171# read another config file and process that file as well. The included
172# file can include other files, add new test cases or default
173# statements. Config variables will be passed to these files and changes
174# to config variables will be seen by top level config files. Including
175# a file is processed just like the contents of the file was cut and pasted
176# into the top level file, except, that include files that end with
177# TEST_START sections will have that section ended at the end of
178# the include file. That is, an included file is included followed
179# by another DEFAULT keyword.
180#
181# Unlike other files referenced in this config, the file path does not need
182# to be absolute. If the file does not start with '/', then the directory
183# that the current config file was located in is used. If no config by the
184# given name is found there, then the current directory is searched.
185#
186# INCLUDE myfile
187# DEFAULT
188#
189# is the same as:
190#
191# INCLUDE myfile
192#
193# Note, if the include file does not contain a full path, the file is
194# searched first by the location of the original include file, and then
195# by the location that ktest.pl was executed in.
196#
75 197
76#### Config variables #### 198#### Config variables ####
77# 199#
@@ -253,9 +375,10 @@
253 375
254# The default test type (default test) 376# The default test type (default test)
255# The test types may be: 377# The test types may be:
256# build - only build the kernel, do nothing else 378# build - only build the kernel, do nothing else
257# boot - build and boot the kernel 379# install - build and install, but do nothing else (does not reboot)
258# test - build, boot and if TEST is set, run the test script 380# boot - build, install, and boot the kernel
381# test - build, boot and if TEST is set, run the test script
259# (If TEST is not set, it defaults back to boot) 382# (If TEST is not set, it defaults back to boot)
260# bisect - Perform a bisect on the kernel (see BISECT_TYPE below) 383# bisect - Perform a bisect on the kernel (see BISECT_TYPE below)
261# patchcheck - Do a test on a series of commits in git (see PATCHCHECK below) 384# patchcheck - Do a test on a series of commits in git (see PATCHCHECK below)
@@ -293,6 +416,13 @@
293# or on some systems: 416# or on some systems:
294#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION 417#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
295 418
419# If for some reason you just want to boot the kernel and you do not
420# want the test to install anything new. For example, you may just want
421# to boot test the same kernel over and over and do not want to go through
422# the hassle of installing anything, you can set this option to 1
423# (default 0)
424#NO_INSTALL = 1
425
296# If there is a script that you require to run before the build is done 426# If there is a script that you require to run before the build is done
297# you can specify it with PRE_BUILD. 427# you can specify it with PRE_BUILD.
298# 428#
@@ -415,6 +545,14 @@
415# (default "login:") 545# (default "login:")
416#SUCCESS_LINE = login: 546#SUCCESS_LINE = login:
417 547
548# To speed up between reboots, defining a line that the
549# default kernel produces that represents that the default
550# kernel has successfully booted and can be used to pass
551# a new test kernel to it. Otherwise ktest.pl will wait till
552# SLEEP_TIME to continue.
553# (default undefined)
554#REBOOT_SUCCESS_LINE = login:
555
418# In case the console constantly fills the screen, having 556# In case the console constantly fills the screen, having
419# a specified time to stop the test after success is recommended. 557# a specified time to stop the test after success is recommended.
420# (in seconds) 558# (in seconds)
@@ -480,6 +618,8 @@
480# another test. If a reboot to the reliable kernel happens, 618# another test. If a reboot to the reliable kernel happens,
481# we wait SLEEP_TIME for the console to stop producing output 619# we wait SLEEP_TIME for the console to stop producing output
482# before starting the next test. 620# before starting the next test.
621#
622# You can speed up reboot times even more by setting REBOOT_SUCCESS_LINE.
483# (default 60) 623# (default 60)
484#SLEEP_TIME = 60 624#SLEEP_TIME = 60
485 625