aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-platform-ideapad-laptop11
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-tuner.xml2
-rw-r--r--Documentation/block/00-INDEX10
-rw-r--r--Documentation/block/cfq-iosched.txt77
-rw-r--r--Documentation/block/queue-sysfs.txt64
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt8
-rw-r--r--Documentation/devicetree/bindings/regulator/tps6586x.txt12
-rw-r--r--Documentation/filesystems/vfat.txt11
-rw-r--r--Documentation/networking/netconsole.txt19
-rw-r--r--Documentation/pinctrl.txt6
-rw-r--r--Documentation/vm/hugetlbpage.txt10
-rw-r--r--Documentation/watchdog/src/watchdog-test.c2
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/include/asm/atomic.h4
-rw-r--r--arch/alpha/include/asm/fpu.h2
-rw-r--r--arch/alpha/include/asm/ptrace.h5
-rw-r--r--arch/alpha/include/asm/socket.h2
-rw-r--r--arch/alpha/include/asm/uaccess.h34
-rw-r--r--arch/alpha/include/asm/unistd.h4
-rw-r--r--arch/alpha/include/asm/word-at-a-time.h55
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c3
-rw-r--r--arch/alpha/kernel/entry.S161
-rw-r--r--arch/alpha/kernel/osf_sys.c49
-rw-r--r--arch/alpha/kernel/process.c19
-rw-r--r--arch/alpha/kernel/systbls.S4
-rw-r--r--arch/alpha/lib/Makefile2
-rw-r--r--arch/alpha/lib/ev6-strncpy_from_user.S424
-rw-r--r--arch/alpha/lib/ev67-strlen_user.S107
-rw-r--r--arch/alpha/lib/strlen_user.S91
-rw-r--r--arch/alpha/lib/strncpy_from_user.S339
-rw-r--r--arch/alpha/mm/fault.c36
-rw-r--r--arch/alpha/oprofile/common.c1
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi5
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts4
-rw-r--r--arch/arm/boot/dts/kirkwood-iconnect.dts6
-rw-r--r--arch/arm/boot/dts/twl6030.dtsi3
-rw-r--r--arch/arm/configs/u8500_defconfig1
-rw-r--r--arch/arm/mach-dove/common.c3
-rw-r--r--arch/arm/mach-exynos/mach-origen.c7
-rw-r--r--arch/arm/mach-exynos/mach-smdkv310.c7
-rw-r--r--arch/arm/mach-imx/Makefile10
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c8
-rw-r--r--arch/arm/mach-imx/headsmp.S (renamed from arch/arm/mach-imx/head-v7.S)0
-rw-r--r--arch/arm/mach-imx/hotplug.c23
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c4
-rw-r--r--arch/arm/mach-kirkwood/Makefile.boot3
-rw-r--r--arch/arm/mach-kirkwood/common.c4
-rw-r--r--arch/arm/mach-mmp/sram.c2
-rw-r--r--arch/arm/mach-mv78xx0/addr-map.c2
-rw-r--r--arch/arm/mach-mv78xx0/common.c6
-rw-r--r--arch/arm/mach-omap2/Kconfig3
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c1
-rw-r--r--arch/arm/mach-omap2/common-board-devices.c11
-rw-r--r--arch/arm/mach-omap2/common-board-devices.h1
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c3
-rw-r--r--arch/arm/mach-omap2/mux.h1
-rw-r--r--arch/arm/mach-omap2/opp4xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/pm34xx.c19
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S8
-rw-r--r--arch/arm/mach-omap2/twl-common.c1
-rw-r--r--arch/arm/mach-orion5x/common.c3
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/dma.h3
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-ux500/board-mop500-msp.c10
-rw-r--r--arch/arm/mach-ux500/board-mop500.c4
-rw-r--r--arch/arm/plat-omap/dmtimer.c6
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h3
-rw-r--r--arch/arm/plat-omap/include/plat/multi.h9
-rw-r--r--arch/arm/plat-omap/include/plat/uncompress.h4
-rw-r--r--arch/arm/plat-orion/common.c8
-rw-r--r--arch/arm/plat-orion/include/plat/common.h6
-rw-r--r--arch/arm/plat-s3c24xx/dma.c2
-rw-r--r--arch/arm/plat-samsung/devs.c29
-rw-r--r--arch/arm/plat-samsung/include/plat/hdmi.h16
-rw-r--r--arch/arm/plat-samsung/pm.c2
-rw-r--r--arch/ia64/configs/generic_defconfig1
-rw-r--r--arch/ia64/configs/gensparse_defconfig1
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/Kconfig.cpu5
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/alchemy/board-mtx1.c2
-rw-r--r--arch/mips/ath79/dev-usb.c2
-rw-r--r--arch/mips/ath79/gpio.c6
-rw-r--r--arch/mips/bcm63xx/dev-spi.c4
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c89
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h3
-rw-r--r--arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h13
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h10
-rw-r--r--arch/mips/include/asm/module.h1
-rw-r--r--arch/mips/include/asm/r4k-timer.h8
-rw-r--r--arch/mips/kernel/module.c43
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/kernel/sync-r4k.c26
-rw-r--r--arch/mips/mti-malta/malta-pci.c13
-rw-r--r--arch/mips/pci/pci-ar724x.c22
-rw-r--r--arch/parisc/include/asm/atomic.h4
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/sys_parisc.c8
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-post.dtsi7
-rw-r--r--arch/powerpc/configs/85xx/p1023rds_defconfig31
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig29
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig1
-rw-r--r--arch/powerpc/configs/g5_defconfig103
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig18
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig33
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig32
-rw-r--r--arch/powerpc/include/asm/cputable.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h12
-rw-r--r--arch/powerpc/include/asm/mpic_msgr.h1
-rw-r--r--arch/powerpc/kernel/dma-iommu.c9
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c2
-rw-r--r--arch/powerpc/kernel/kgdb.c27
-rw-r--r--arch/powerpc/kernel/syscalls.c8
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S12
-rw-r--r--arch/powerpc/kvm/e500_tlb.c11
-rw-r--r--arch/powerpc/lib/copyuser_power7.S35
-rw-r--r--arch/powerpc/lib/memcpy_power7.S4
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c13
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c3
-rw-r--r--arch/powerpc/xmon/xmon.c84
-rw-r--r--arch/s390/include/asm/elf.h3
-rw-r--r--arch/s390/include/asm/posix_types.h3
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/include/asm/spinlock.h3
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c10
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c253
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h46
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/microcode_amd.c7
-rw-r--r--arch/x86/kvm/emulate.c30
-rw-r--r--arch/x86/kvm/mmu.c13
-rw-r--r--arch/x86/kvm/x86.c5
-rw-r--r--arch/x86/mm/hugetlbpage.c21
-rw-r--r--arch/x86/mm/pageattr.c10
-rw-r--r--arch/x86/platform/efi/efi.c30
-rw-r--r--arch/x86/realmode/rm/Makefile2
-rw-r--r--arch/x86/syscalls/syscall_64.tbl6
-rw-r--r--arch/x86/xen/enlighten.c118
-rw-r--r--arch/x86/xen/p2m.c95
-rw-r--r--arch/x86/xen/setup.c9
-rw-r--r--arch/x86/xen/suspend.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--block/blk-lib.c41
-rw-r--r--block/blk-merge.c117
-rw-r--r--block/genhd.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c1
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c8
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ata_piix.c8
-rw-r--r--drivers/ata/libahci.c3
-rw-r--r--drivers/ata/libata-acpi.c15
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/pata_atiixp.c16
-rw-r--r--drivers/base/core.c4
-rw-r--r--drivers/block/cciss_scsi.c11
-rw-r--r--drivers/block/drbd/drbd_bitmap.c15
-rw-r--r--drivers/block/drbd/drbd_int.h1
-rw-r--r--drivers/block/drbd/drbd_main.c28
-rw-r--r--drivers/block/drbd/drbd_nl.c4
-rw-r--r--drivers/block/drbd/drbd_req.c36
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c2
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/agp/intel-gtt.c105
-rw-r--r--drivers/clocksource/cs5535-clockevt.c4
-rw-r--r--drivers/cpufreq/omap-cpufreq.c4
-rw-r--r--drivers/crypto/caam/jr.c10
-rw-r--r--drivers/crypto/hifn_795x.c4
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c3
-rw-r--r--drivers/gpu/drm/drm_proc.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c7
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c36
-rw-r--r--drivers/gpu/drm/i915/intel_display.c12
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c31
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c15
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c61
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c29
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c140
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c131
-rw-r--r--drivers/gpu/drm/radeon/r600d.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c138
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6009
-rw-r--r--drivers/gpu/drm/udl/Kconfig1
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c61
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hwmon/asus_atk0110.c6
-rw-r--r--drivers/hwmon/coretemp.c2
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c1
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c28
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/ide/ide-pm.c4
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/intel_irq_remapping.c18
-rw-r--r--drivers/media/dvb/siano/smsusb.c2
-rw-r--r--drivers/media/radio/radio-shark.c151
-rw-r--r--drivers/media/radio/radio-shark2.c137
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c5
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c2
-rw-r--r--drivers/media/rc/Kconfig1
-rw-r--r--drivers/media/video/gspca/jl2005bcd.c2
-rw-r--r--drivers/media/video/gspca/spca506.c2
-rw-r--r--drivers/media/video/mem2mem_testdev.c2
-rw-r--r--drivers/media/video/mx1_camera.c4
-rw-r--r--drivers/media/video/mx2_camera.c47
-rw-r--r--drivers/media/video/mx3_camera.c22
-rw-r--r--drivers/media/video/soc_camera.c3
-rw-r--r--drivers/media/video/soc_mediabus.c6
-rw-r--r--drivers/media/video/uvc/uvc_queue.c1
-rw-r--r--drivers/media/video/v4l2-ioctl.c10
-rw-r--r--drivers/mfd/Kconfig3
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c84
-rw-r--r--drivers/net/bonding/bond_main.c12
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c4
-rw-r--r--drivers/net/can/softing/softing_fw.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c18
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c4
-rw-r--r--drivers/net/ethernet/renesas/Kconfig4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c11
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h4
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c3
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c4
-rw-r--r--drivers/net/fddi/skfp/pmf.c2
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/netconsole.c6
-rw-r--r--drivers/net/phy/mdio-mux.c2
-rw-r--r--drivers/net/team/team.c16
-rw-r--r--drivers/net/usb/qmi_wwan.c255
-rw-r--r--drivers/net/usb/sierra_net.c52
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/wan/dscc4.c5
-rw-r--r--drivers/net/wimax/i2400m/fw.c4
-rw-r--r--drivers/net/wireless/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c30
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c2
-rw-r--r--drivers/net/xen-netfront.c39
-rw-r--r--drivers/of/base.c27
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pci/pci-driver.c7
-rw-r--r--drivers/pinctrl/core.c13
-rw-r--r--drivers/pinctrl/pinctrl-imx51.c2
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-db8500.c5
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c2
-rw-r--r--drivers/platform/x86/Kconfig6
-rw-r--r--drivers/platform/x86/apple-gmux.c426
-rw-r--r--drivers/platform/x86/asus-wmi.c21
-rw-r--r--drivers/platform/x86/asus-wmi.h1
-rw-r--r--drivers/platform/x86/classmate-laptop.c12
-rw-r--r--drivers/platform/x86/dell-laptop.c12
-rw-r--r--drivers/platform/x86/ideapad-laptop.c110
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c7
-rw-r--r--drivers/pwm/Kconfig31
-rw-r--r--drivers/pwm/core.c12
-rw-r--r--drivers/pwm/pwm-samsung.c1
-rw-r--r--drivers/pwm/pwm-tegra.c4
-rw-r--r--drivers/pwm/pwm-tiecap.c4
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c4
-rw-r--r--drivers/pwm/pwm-vt8500.c2
-rw-r--r--drivers/rapidio/devices/tsi721.c12
-rw-r--r--drivers/regulator/ab3100.c1
-rw-r--r--drivers/regulator/anatop-regulator.c5
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/gpio-regulator.c38
-rw-r--r--drivers/regulator/palmas-regulator.c23
-rw-r--r--drivers/regulator/tps6586x-regulator.c8
-rw-r--r--drivers/regulator/twl-regulator.c5
-rw-r--r--drivers/rtc/rtc-pcf2123.c2
-rw-r--r--drivers/rtc/rtc-rs5c348.c7
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_ioctl.c7
-rw-r--r--drivers/spi/spi-bcm63xx.c35
-rw-r--r--drivers/spi/spi-coldfire-qspi.c5
-rw-r--r--drivers/spi/spi-omap2-mcspi.c6
-rw-r--r--drivers/spi/spi-pl022.c1
-rw-r--r--drivers/spi/spi-s3c64xx.c12
-rw-r--r--drivers/staging/vt6656/main_usb.c2
-rw-r--r--drivers/staging/winbond/wbusb.c2
-rw-r--r--drivers/target/target_core_pscsi.c9
-rw-r--r--drivers/target/target_core_transport.c15
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c8
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c4
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/misc/emi62.c2
-rw-r--r--drivers/vfio/vfio.c19
-rw-r--r--drivers/vhost/tcm_vhost.c203
-rw-r--r--drivers/vhost/tcm_vhost.h12
-rw-r--r--drivers/video/console/fbcon.c9
-rw-r--r--drivers/watchdog/booke_wdt.c7
-rw-r--r--drivers/watchdog/da9052_wdt.c1
-rw-r--r--drivers/xen/platform-pci.c15
-rw-r--r--fs/bio.c11
-rw-r--r--fs/block_dev.c3
-rw-r--r--fs/btrfs/backref.c4
-rw-r--r--fs/btrfs/compression.c1
-rw-r--r--fs/btrfs/ctree.c9
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/delayed-inode.c12
-rw-r--r--fs/btrfs/delayed-ref.c163
-rw-r--r--fs/btrfs/delayed-ref.h4
-rw-r--r--fs/btrfs/disk-io.c53
-rw-r--r--fs/btrfs/disk-io.h2
-rw-r--r--fs/btrfs/extent-tree.c123
-rw-r--r--fs/btrfs/extent_io.c17
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/inode.c326
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/locking.c2
-rw-r--r--fs/btrfs/qgroup.c12
-rw-r--r--fs/btrfs/root-tree.c4
-rw-r--r--fs/btrfs/super.c15
-rw-r--r--fs/btrfs/transaction.c3
-rw-r--r--fs/btrfs/volumes.c33
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/buffer.c66
-rw-r--r--fs/ceph/debugfs.c1
-rw-r--r--fs/ceph/inode.c15
-rw-r--r--fs/ceph/ioctl.c3
-rw-r--r--fs/compat.c10
-rw-r--r--fs/direct-io.c5
-rw-r--r--fs/eventpoll.c2
-rw-r--r--fs/jbd/journal.c5
-rw-r--r--fs/logfs/dev_bdev.c15
-rw-r--r--fs/logfs/inode.c18
-rw-r--r--fs/logfs/journal.c2
-rw-r--r--fs/logfs/readwrite.c1
-rw-r--r--fs/logfs/segment.c2
-rw-r--r--fs/namei.c2
-rw-r--r--fs/nfs/Makefile18
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/idmap.c62
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs4_fs.h3
-rw-r--r--fs/nfs/nfs4client.c2
-rw-r--r--fs/nfs/nfs4proc.c76
-rw-r--r--fs/nfs/nfs4super.c15
-rw-r--r--fs/nfs/nfs4xdr.c26
-rw-r--r--fs/nfs/objlayout/objio_osd.c55
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/pnfs.c39
-rw-r--r--fs/nfs/pnfs.h2
-rw-r--r--fs/nfs/super.c39
-rw-r--r--fs/nfs/write.c15
-rw-r--r--fs/nfsd/nfs4callback.c4
-rw-r--r--fs/nfsd/state.h1
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/reiserfs/bitmap.c2
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/ubifs/debug.h2
-rw-r--r--fs/ubifs/lpt.c5
-rw-r--r--fs/ubifs/recovery.c2
-rw-r--r--fs/ubifs/replay.c3
-rw-r--r--fs/ubifs/super.c3
-rw-r--r--fs/udf/inode.c5
-rw-r--r--fs/udf/super.c7
-rw-r--r--fs/xfs/xfs_discard.c6
-rw-r--r--fs/xfs/xfs_ialloc.c17
-rw-r--r--fs/xfs/xfs_rtalloc.c2
-rw-r--r--include/asm-generic/mutex-xchg.h11
-rw-r--r--include/drm/drm_crtc.h5
-rw-r--r--include/drm/drm_mode.h5
-rw-r--r--include/linux/blkdev.h14
-rw-r--r--include/linux/compaction.h4
-rw-r--r--include/linux/cpuidle.h4
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/if_team.h30
-rw-r--r--include/linux/kref.h18
-rw-r--r--include/linux/ktime.h7
-rw-r--r--include/linux/mv643xx_eth.h2
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h2
-rw-r--r--include/linux/netpoll.h42
-rw-r--r--include/linux/nfs_page.h1
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/of.h7
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/pinctrl/consumer.h1
-rw-r--r--include/linux/string.h2
-rw-r--r--include/linux/time.h29
-rw-r--r--include/net/llc.h2
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h1
-rw-r--r--include/net/scm.h4
-rw-r--r--include/net/xfrm.h2
-rw-r--r--include/sound/pcm.h3
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--include/xen/events.h2
-rw-r--r--init/main.c8
-rw-r--r--ipc/mqueue.c61
-rw-r--r--kernel/audit_tree.c19
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/sched/core.c35
-rw-r--r--kernel/sched/fair.c11
-rw-r--r--kernel/sched/rt.c13
-rw-r--r--kernel/sched/sched.h8
-rw-r--r--kernel/sched/stop_task.c22
-rw-r--r--kernel/task_work.c1
-rw-r--r--kernel/time/timekeeping.c39
-rw-r--r--kernel/timer.c9
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--mm/compaction.c156
-rw-r--r--mm/filemap.c7
-rw-r--r--mm/internal.h1
-rw-r--r--mm/mmap.c7
-rw-r--r--mm/page_alloc.c38
-rw-r--r--mm/slab.c1
-rw-r--r--net/8021q/vlan_dev.c52
-rw-r--r--net/atm/common.c1
-rw-r--r--net/atm/pvc.c1
-rw-r--r--net/bluetooth/hci_event.c28
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/l2cap_core.c1
-rw-r--r--net/bluetooth/l2cap_sock.c3
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bluetooth/sco.c19
-rw-r--r--net/bluetooth/smp.c5
-rw-r--r--net/bridge/br_device.c30
-rw-r--r--net/bridge/br_forward.c2
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/caif/chnl_net.c4
-rw-r--r--net/ceph/ceph_common.c1
-rw-r--r--net/ceph/debugfs.c4
-rw-r--r--net/ceph/messenger.c11
-rw-r--r--net/ceph/mon_client.c51
-rw-r--r--net/core/dev.c17
-rw-r--r--net/core/netpoll.c99
-rw-r--r--net/core/netprio_cgroup.c30
-rw-r--r--net/core/scm.c4
-rw-r--r--net/dccp/ccid.h4
-rw-r--r--net/dccp/ccids/ccid3.c1
-rw-r--r--net/ipv4/inet_connection_sock.c7
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ipmr.c14
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c14
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp_input.c15
-rw-r--r--net/ipv4/tcp_ipv4.c9
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c14
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/esp6.c6
-rw-r--r--net/ipv6/proc.c4
-rw-r--r--net/ipv6/tcp_ipv6.c25
-rw-r--r--net/ipv6/xfrm6_policy.c8
-rw-r--r--net/l2tp/l2tp_core.c3
-rw-r--r--net/l2tp/l2tp_core.h1
-rw-r--r--net/l2tp/l2tp_ip6.c1
-rw-r--r--net/llc/af_llc.c8
-rw-r--r--net/llc/llc_input.c21
-rw-r--r--net/llc/llc_station.c23
-rw-r--r--net/mac80211/tx.c38
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c5
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_conntrack_expect.c29
-rw-r--r--net/netfilter/nf_conntrack_netlink.c10
-rw-r--r--net/netfilter/nf_conntrack_sip.c92
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netlink/af_netlink.c6
-rw-r--r--net/packet/af_packet.c9
-rw-r--r--net/sched/act_mirred.c11
-rw-r--r--net/socket.c1
-rw-r--r--net/sunrpc/svc_xprt.c10
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_state.c4
-rwxr-xr-xscripts/checkpatch.pl3
-rw-r--r--sound/arm/pxa2xx-ac97.c4
-rw-r--r--sound/atmel/abdac.c3
-rw-r--r--sound/atmel/ac97c.c14
-rw-r--r--sound/drivers/aloop.c2
-rw-r--r--sound/drivers/dummy.c2
-rw-r--r--sound/drivers/pcsp/pcsp.c4
-rw-r--r--sound/isa/als100.c2
-rw-r--r--sound/oss/sb_audio.c4
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c2
-rw-r--r--sound/pci/ctxfi/ctatc.c4
-rw-r--r--sound/pci/hda/hda_beep.c29
-rw-r--r--sound/pci/hda/hda_codec.c73
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/hda_intel.c9
-rw-r--r--sound/pci/hda/hda_proc.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c174
-rw-r--r--sound/pci/hda/patch_sigmatel.c9
-rw-r--r--sound/pci/hda/patch_via.c8
-rw-r--r--sound/pci/lx6464es/lx6464es.c2
-rw-r--r--sound/pci/rme9652/hdspm.c2
-rw-r--r--sound/pci/sis7019.c5
-rw-r--r--sound/ppc/powermac.c2
-rw-r--r--sound/ppc/snd_ps3.c1
-rw-r--r--sound/soc/blackfin/bf6xx-sport.c7
-rw-r--r--sound/soc/codecs/wm5102.c25
-rw-r--r--sound/soc/codecs/wm5110.c12
-rw-r--r--sound/soc/codecs/wm8962.c15
-rw-r--r--sound/soc/codecs/wm8994.c2
-rw-r--r--sound/soc/codecs/wm9712.c21
-rw-r--r--sound/soc/davinci/davinci-mcasp.c10
-rw-r--r--sound/soc/fsl/imx-ssi.c5
-rw-r--r--sound/soc/mxs/Kconfig2
-rw-r--r--sound/soc/omap/mcbsp.c2
-rw-r--r--sound/soc/samsung/pcm.c2
-rw-r--r--sound/soc/soc-core.c10
-rw-r--r--sound/soc/soc-jack.c2
-rw-r--r--sound/usb/endpoint.c4
-rw-r--r--sound/usb/pcm.c3
-rw-r--r--tools/perf/util/python-ext-sources2
-rw-r--r--virt/kvm/kvm_main.c7
591 files changed, 5748 insertions, 4638 deletions
diff --git a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
index 814b01354c41..b31e782bd985 100644
--- a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
+++ b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
@@ -5,4 +5,15 @@ Contact: "Ike Panhc <ike.pan@canonical.com>"
5Description: 5Description:
6 Control the power of camera module. 1 means on, 0 means off. 6 Control the power of camera module. 1 means on, 0 means off.
7 7
8What: /sys/devices/platform/ideapad/fan_mode
9Date: June 2012
10KernelVersion: 3.6
11Contact: "Maxim Mikityanskiy <maxtram95@gmail.com>"
12Description:
13 Change fan mode
14 There are four available modes:
15 * 0 -> Super Silent Mode
16 * 1 -> Standard Mode
17 * 2 -> Dust Cleaning
18 * 4 -> Efficient Thermal Dissipation Mode
8 19
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
index 720395127904..701138f1209d 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
@@ -125,7 +125,7 @@ the structure refers to a radio tuner the
125<constant>V4L2_TUNER_CAP_NORM</constant> flags can't be used.</para> 125<constant>V4L2_TUNER_CAP_NORM</constant> flags can't be used.</para>
126<para>If multiple frequency bands are supported, then 126<para>If multiple frequency bands are supported, then
127<structfield>capability</structfield> is the union of all 127<structfield>capability</structfield> is the union of all
128<structfield>capability></structfield> fields of each &v4l2-frequency-band;. 128<structfield>capability</structfield> fields of each &v4l2-frequency-band;.
129</para></entry> 129</para></entry>
130 </row> 130 </row>
131 <row> 131 <row>
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index d111e3b23db0..d18ecd827c40 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -3,15 +3,21 @@
3biodoc.txt 3biodoc.txt
4 - Notes on the Generic Block Layer Rewrite in Linux 2.5 4 - Notes on the Generic Block Layer Rewrite in Linux 2.5
5capability.txt 5capability.txt
6 - Generic Block Device Capability (/sys/block/<disk>/capability) 6 - Generic Block Device Capability (/sys/block/<device>/capability)
7cfq-iosched.txt
8 - CFQ IO scheduler tunables
9data-integrity.txt
10 - Block data integrity
7deadline-iosched.txt 11deadline-iosched.txt
8 - Deadline IO scheduler tunables 12 - Deadline IO scheduler tunables
9ioprio.txt 13ioprio.txt
10 - Block io priorities (in CFQ scheduler) 14 - Block io priorities (in CFQ scheduler)
15queue-sysfs.txt
16 - Queue's sysfs entries
11request.txt 17request.txt
12 - The members of struct request (in include/linux/blkdev.h) 18 - The members of struct request (in include/linux/blkdev.h)
13stat.txt 19stat.txt
14 - Block layer statistics in /sys/block/<dev>/stat 20 - Block layer statistics in /sys/block/<device>/stat
15switching-sched.txt 21switching-sched.txt
16 - Switching I/O schedulers at runtime 22 - Switching I/O schedulers at runtime
17writeback_cache_control.txt 23writeback_cache_control.txt
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
index 6d670f570451..d89b4fe724d7 100644
--- a/Documentation/block/cfq-iosched.txt
+++ b/Documentation/block/cfq-iosched.txt
@@ -1,3 +1,14 @@
1CFQ (Complete Fairness Queueing)
2===============================
3
4The main aim of CFQ scheduler is to provide a fair allocation of the disk
5I/O bandwidth for all the processes which requests an I/O operation.
6
7CFQ maintains the per process queue for the processes which request I/O
8operation(syncronous requests). In case of asynchronous requests, all the
9requests from all the processes are batched together according to their
10process's I/O priority.
11
1CFQ ioscheduler tunables 12CFQ ioscheduler tunables
2======================== 13========================
3 14
@@ -25,6 +36,72 @@ there are multiple spindles behind single LUN (Host based hardware RAID
25controller or for storage arrays), setting slice_idle=0 might end up in better 36controller or for storage arrays), setting slice_idle=0 might end up in better
26throughput and acceptable latencies. 37throughput and acceptable latencies.
27 38
39back_seek_max
40-------------
41This specifies, given in Kbytes, the maximum "distance" for backward seeking.
42The distance is the amount of space from the current head location to the
43sectors that are backward in terms of distance.
44
45This parameter allows the scheduler to anticipate requests in the "backward"
46direction and consider them as being the "next" if they are within this
47distance from the current head location.
48
49back_seek_penalty
50-----------------
51This parameter is used to compute the cost of backward seeking. If the
52backward distance of request is just 1/back_seek_penalty from a "front"
53request, then the seeking cost of two requests is considered equivalent.
54
55So scheduler will not bias toward one or the other request (otherwise scheduler
56will bias toward front request). Default value of back_seek_penalty is 2.
57
58fifo_expire_async
59-----------------
60This parameter is used to set the timeout of asynchronous requests. Default
61value of this is 248ms.
62
63fifo_expire_sync
64----------------
65This parameter is used to set the timeout of synchronous requests. Default
66value of this is 124ms. In case to favor synchronous requests over asynchronous
67one, this value should be decreased relative to fifo_expire_async.
68
69slice_async
70-----------
71This parameter is same as of slice_sync but for asynchronous queue. The
72default value is 40ms.
73
74slice_async_rq
75--------------
76This parameter is used to limit the dispatching of asynchronous request to
77device request queue in queue's slice time. The maximum number of request that
78are allowed to be dispatched also depends upon the io priority. Default value
79for this is 2.
80
81slice_sync
82----------
83When a queue is selected for execution, the queues IO requests are only
84executed for a certain amount of time(time_slice) before switching to another
85queue. This parameter is used to calculate the time slice of synchronous
86queue.
87
88time_slice is computed using the below equation:-
89time_slice = slice_sync + (slice_sync/5 * (4 - prio)). To increase the
90time_slice of synchronous queue, increase the value of slice_sync. Default
91value is 100ms.
92
93quantum
94-------
95This specifies the number of request dispatched to the device queue. In a
96queue's time slice, a request will not be dispatched if the number of request
97in the device exceeds this parameter. This parameter is used for synchronous
98request.
99
100In case of storage with several disk, this setting can limit the parallel
101processing of request. Therefore, increasing the value can imporve the
102performace although this can cause the latency of some I/O to increase due
103to more number of requests.
104
28CFQ IOPS Mode for group scheduling 105CFQ IOPS Mode for group scheduling
29=================================== 106===================================
30Basic CFQ design is to provide priority based time slices. Higher priority 107Basic CFQ design is to provide priority based time slices. Higher priority
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 6518a55273e7..e54ac1d53403 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -9,20 +9,71 @@ These files are the ones found in the /sys/block/xxx/queue/ directory.
9Files denoted with a RO postfix are readonly and the RW postfix means 9Files denoted with a RO postfix are readonly and the RW postfix means
10read-write. 10read-write.
11 11
12add_random (RW)
13----------------
14This file allows to trun off the disk entropy contribution. Default
15value of this file is '1'(on).
16
17discard_granularity (RO)
18-----------------------
19This shows the size of internal allocation of the device in bytes, if
20reported by the device. A value of '0' means device does not support
21the discard functionality.
22
23discard_max_bytes (RO)
24----------------------
25Devices that support discard functionality may have internal limits on
26the number of bytes that can be trimmed or unmapped in a single operation.
27The discard_max_bytes parameter is set by the device driver to the maximum
28number of bytes that can be discarded in a single operation. Discard
29requests issued to the device must not exceed this limit. A discard_max_bytes
30value of 0 means that the device does not support discard functionality.
31
32discard_zeroes_data (RO)
33------------------------
34When read, this file will show if the discarded block are zeroed by the
35device or not. If its value is '1' the blocks are zeroed otherwise not.
36
12hw_sector_size (RO) 37hw_sector_size (RO)
13------------------- 38-------------------
14This is the hardware sector size of the device, in bytes. 39This is the hardware sector size of the device, in bytes.
15 40
41iostats (RW)
42-------------
43This file is used to control (on/off) the iostats accounting of the
44disk.
45
46logical_block_size (RO)
47-----------------------
48This is the logcal block size of the device, in bytes.
49
16max_hw_sectors_kb (RO) 50max_hw_sectors_kb (RO)
17---------------------- 51----------------------
18This is the maximum number of kilobytes supported in a single data transfer. 52This is the maximum number of kilobytes supported in a single data transfer.
19 53
54max_integrity_segments (RO)
55---------------------------
56When read, this file shows the max limit of integrity segments as
57set by block layer which a hardware controller can handle.
58
20max_sectors_kb (RW) 59max_sectors_kb (RW)
21------------------- 60-------------------
22This is the maximum number of kilobytes that the block layer will allow 61This is the maximum number of kilobytes that the block layer will allow
23for a filesystem request. Must be smaller than or equal to the maximum 62for a filesystem request. Must be smaller than or equal to the maximum
24size allowed by the hardware. 63size allowed by the hardware.
25 64
65max_segments (RO)
66-----------------
67Maximum number of segments of the device.
68
69max_segment_size (RO)
70---------------------
71Maximum segment size of the device.
72
73minimum_io_size (RO)
74--------------------
75This is the smallest preferred io size reported by the device.
76
26nomerges (RW) 77nomerges (RW)
27------------- 78-------------
28This enables the user to disable the lookup logic involved with IO 79This enables the user to disable the lookup logic involved with IO
@@ -45,11 +96,24 @@ per-block-cgroup request pool. IOW, if there are N block cgroups,
45each request queue may have upto N request pools, each independently 96each request queue may have upto N request pools, each independently
46regulated by nr_requests. 97regulated by nr_requests.
47 98
99optimal_io_size (RO)
100--------------------
101This is the optimal io size reported by the device.
102
103physical_block_size (RO)
104------------------------
105This is the physical block size of device, in bytes.
106
48read_ahead_kb (RW) 107read_ahead_kb (RW)
49------------------ 108------------------
50Maximum number of kilobytes to read-ahead for filesystems on this block 109Maximum number of kilobytes to read-ahead for filesystems on this block
51device. 110device.
52 111
112rotational (RW)
113---------------
114This file is used to stat if the device is of rotational type or
115non-rotational type.
116
53rq_affinity (RW) 117rq_affinity (RW)
54---------------- 118----------------
55If this option is '1', the block layer will migrate request completions to the 119If this option is '1', the block layer will migrate request completions to the
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index 70cd49b1caa8..1dd622546d06 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -10,8 +10,8 @@ Required properties:
10- compatible : Should be "fsl,<chip>-esdhc" 10- compatible : Should be "fsl,<chip>-esdhc"
11 11
12Optional properties: 12Optional properties:
13- fsl,cd-internal : Indicate to use controller internal card detection 13- fsl,cd-controller : Indicate to use controller internal card detection
14- fsl,wp-internal : Indicate to use controller internal write protection 14- fsl,wp-controller : Indicate to use controller internal write protection
15 15
16Examples: 16Examples:
17 17
@@ -19,8 +19,8 @@ esdhc@70004000 {
19 compatible = "fsl,imx51-esdhc"; 19 compatible = "fsl,imx51-esdhc";
20 reg = <0x70004000 0x4000>; 20 reg = <0x70004000 0x4000>;
21 interrupts = <1>; 21 interrupts = <1>;
22 fsl,cd-internal; 22 fsl,cd-controller;
23 fsl,wp-internal; 23 fsl,wp-controller;
24}; 24};
25 25
26esdhc@70008000 { 26esdhc@70008000 {
diff --git a/Documentation/devicetree/bindings/regulator/tps6586x.txt b/Documentation/devicetree/bindings/regulator/tps6586x.txt
index d156e1b5db12..da80c2ae0915 100644
--- a/Documentation/devicetree/bindings/regulator/tps6586x.txt
+++ b/Documentation/devicetree/bindings/regulator/tps6586x.txt
@@ -9,9 +9,9 @@ Required properties:
9- regulators: list of regulators provided by this controller, must have 9- regulators: list of regulators provided by this controller, must have
10 property "regulator-compatible" to match their hardware counterparts: 10 property "regulator-compatible" to match their hardware counterparts:
11 sm[0-2], ldo[0-9] and ldo_rtc 11 sm[0-2], ldo[0-9] and ldo_rtc
12- sm0-supply: The input supply for the SM0. 12- vin-sm0-supply: The input supply for the SM0.
13- sm1-supply: The input supply for the SM1. 13- vin-sm1-supply: The input supply for the SM1.
14- sm2-supply: The input supply for the SM2. 14- vin-sm2-supply: The input supply for the SM2.
15- vinldo01-supply: The input supply for the LDO1 and LDO2 15- vinldo01-supply: The input supply for the LDO1 and LDO2
16- vinldo23-supply: The input supply for the LDO2 and LDO3 16- vinldo23-supply: The input supply for the LDO2 and LDO3
17- vinldo4-supply: The input supply for the LDO4 17- vinldo4-supply: The input supply for the LDO4
@@ -30,9 +30,9 @@ Example:
30 #gpio-cells = <2>; 30 #gpio-cells = <2>;
31 gpio-controller; 31 gpio-controller;
32 32
33 sm0-supply = <&some_reg>; 33 vin-sm0-supply = <&some_reg>;
34 sm1-supply = <&some_reg>; 34 vin-sm1-supply = <&some_reg>;
35 sm2-supply = <&some_reg>; 35 vin-sm2-supply = <&some_reg>;
36 vinldo01-supply = <...>; 36 vinldo01-supply = <...>;
37 vinldo23-supply = <...>; 37 vinldo23-supply = <...>;
38 vinldo4-supply = <...>; 38 vinldo4-supply = <...>;
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt
index ead764b2728f..de1e6c4dccff 100644
--- a/Documentation/filesystems/vfat.txt
+++ b/Documentation/filesystems/vfat.txt
@@ -137,6 +137,17 @@ errors=panic|continue|remount-ro
137 without doing anything or remount the partition in 137 without doing anything or remount the partition in
138 read-only mode (default behavior). 138 read-only mode (default behavior).
139 139
140discard -- If set, issues discard/TRIM commands to the block
141 device when blocks are freed. This is useful for SSD devices
142 and sparse/thinly-provisoned LUNs.
143
144nfs -- This option maintains an index (cache) of directory
145 inodes by i_logstart which is used by the nfs-related code to
146 improve look-ups.
147
148 Enable this only if you want to export the FAT filesystem
149 over NFS
150
140<bool>: 0,1,yes,no,true,false 151<bool>: 0,1,yes,no,true,false
141 152
142TODO 153TODO
diff --git a/Documentation/networking/netconsole.txt b/Documentation/networking/netconsole.txt
index 8d022073e3ef..2e9e0ae2cd45 100644
--- a/Documentation/networking/netconsole.txt
+++ b/Documentation/networking/netconsole.txt
@@ -51,8 +51,23 @@ Built-in netconsole starts immediately after the TCP stack is
51initialized and attempts to bring up the supplied dev at the supplied 51initialized and attempts to bring up the supplied dev at the supplied
52address. 52address.
53 53
54The remote host can run either 'netcat -u -l -p <port>', 54The remote host has several options to receive the kernel messages,
55'nc -l -u <port>' or syslogd. 55for example:
56
571) syslogd
58
592) netcat
60
61 On distributions using a BSD-based netcat version (e.g. Fedora,
62 openSUSE and Ubuntu) the listening port must be specified without
63 the -p switch:
64
65 'nc -u -l -p <port>' / 'nc -u -l <port>' or
66 'netcat -u -l -p <port>' / 'netcat -u -l <port>'
67
683) socat
69
70 'socat udp-recv:<port> -'
56 71
57Dynamic reconfiguration: 72Dynamic reconfiguration:
58======================== 73========================
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
index e40f4b4e1977..1479aca23744 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/pinctrl.txt
@@ -840,9 +840,9 @@ static unsigned long i2c_pin_configs[] = {
840 840
841static struct pinctrl_map __initdata mapping[] = { 841static struct pinctrl_map __initdata mapping[] = {
842 PIN_MAP_MUX_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", "i2c0"), 842 PIN_MAP_MUX_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", "i2c0"),
843 PIN_MAP_MUX_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs), 843 PIN_MAP_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs),
844 PIN_MAP_MUX_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs), 844 PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs),
845 PIN_MAP_MUX_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs), 845 PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs),
846}; 846};
847 847
848Finally, some devices expect the mapping table to contain certain specific 848Finally, some devices expect the mapping table to contain certain specific
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index f8551b3879f8..4ac359b7aa17 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -299,11 +299,17 @@ map_hugetlb.c.
299******************************************************************* 299*******************************************************************
300 300
301/* 301/*
302 * hugepage-shm: see Documentation/vm/hugepage-shm.c 302 * map_hugetlb: see tools/testing/selftests/vm/map_hugetlb.c
303 */ 303 */
304 304
305******************************************************************* 305*******************************************************************
306 306
307/* 307/*
308 * hugepage-mmap: see Documentation/vm/hugepage-mmap.c 308 * hugepage-shm: see tools/testing/selftests/vm/hugepage-shm.c
309 */
310
311*******************************************************************
312
313/*
314 * hugepage-mmap: see tools/testing/selftests/vm/hugepage-mmap.c
309 */ 315 */
diff --git a/Documentation/watchdog/src/watchdog-test.c b/Documentation/watchdog/src/watchdog-test.c
index 73ff5cc93e05..3da822967ee0 100644
--- a/Documentation/watchdog/src/watchdog-test.c
+++ b/Documentation/watchdog/src/watchdog-test.c
@@ -31,7 +31,7 @@ static void keep_alive(void)
31 * or "-e" to enable the card. 31 * or "-e" to enable the card.
32 */ 32 */
33 33
34void term(int sig) 34static void term(int sig)
35{ 35{
36 close(fd); 36 close(fd);
37 fprintf(stderr, "Stopping watchdog ticks...\n"); 37 fprintf(stderr, "Stopping watchdog ticks...\n");
diff --git a/MAINTAINERS b/MAINTAINERS
index 3aed8325a902..fdc0119963e7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7288,6 +7288,12 @@ W: http://www.connecttech.com
7288S: Supported 7288S: Supported
7289F: drivers/usb/serial/whiteheat* 7289F: drivers/usb/serial/whiteheat*
7290 7290
7291USB SMSC75XX ETHERNET DRIVER
7292M: Steve Glendinning <steve.glendinning@shawell.net>
7293L: netdev@vger.kernel.org
7294S: Maintained
7295F: drivers/net/usb/smsc75xx.*
7296
7291USB SMSC95XX ETHERNET DRIVER 7297USB SMSC95XX ETHERNET DRIVER
7292M: Steve Glendinning <steve.glendinning@shawell.net> 7298M: Steve Glendinning <steve.glendinning@shawell.net>
7293L: netdev@vger.kernel.org 7299L: netdev@vger.kernel.org
@@ -7670,23 +7676,28 @@ S: Supported
7670F: Documentation/hwmon/wm83?? 7676F: Documentation/hwmon/wm83??
7671F: arch/arm/mach-s3c64xx/mach-crag6410* 7677F: arch/arm/mach-s3c64xx/mach-crag6410*
7672F: drivers/clk/clk-wm83*.c 7678F: drivers/clk/clk-wm83*.c
7679F: drivers/extcon/extcon-arizona.c
7673F: drivers/leds/leds-wm83*.c 7680F: drivers/leds/leds-wm83*.c
7674F: drivers/gpio/gpio-*wm*.c 7681F: drivers/gpio/gpio-*wm*.c
7682F: drivers/gpio/gpio-arizona.c
7675F: drivers/hwmon/wm83??-hwmon.c 7683F: drivers/hwmon/wm83??-hwmon.c
7676F: drivers/input/misc/wm831x-on.c 7684F: drivers/input/misc/wm831x-on.c
7677F: drivers/input/touchscreen/wm831x-ts.c 7685F: drivers/input/touchscreen/wm831x-ts.c
7678F: drivers/input/touchscreen/wm97*.c 7686F: drivers/input/touchscreen/wm97*.c
7679F: drivers/mfd/wm8*.c 7687F: drivers/mfd/arizona*
7688F: drivers/mfd/wm*.c
7680F: drivers/power/wm83*.c 7689F: drivers/power/wm83*.c
7681F: drivers/rtc/rtc-wm83*.c 7690F: drivers/rtc/rtc-wm83*.c
7682F: drivers/regulator/wm8*.c 7691F: drivers/regulator/wm8*.c
7683F: drivers/video/backlight/wm83*_bl.c 7692F: drivers/video/backlight/wm83*_bl.c
7684F: drivers/watchdog/wm83*_wdt.c 7693F: drivers/watchdog/wm83*_wdt.c
7694F: include/linux/mfd/arizona/
7685F: include/linux/mfd/wm831x/ 7695F: include/linux/mfd/wm831x/
7686F: include/linux/mfd/wm8350/ 7696F: include/linux/mfd/wm8350/
7687F: include/linux/mfd/wm8400* 7697F: include/linux/mfd/wm8400*
7688F: include/linux/wm97xx.h 7698F: include/linux/wm97xx.h
7689F: include/sound/wm????.h 7699F: include/sound/wm????.h
7700F: sound/soc/codecs/arizona.?
7690F: sound/soc/codecs/wm* 7701F: sound/soc/codecs/wm*
7691 7702
7692WORKQUEUE 7703WORKQUEUE
diff --git a/Makefile b/Makefile
index 9cc77acfc881..371ce8899f5c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc4
5NAME = Saber-toothed Squirrel 5NAME = Saber-toothed Squirrel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index d5b9b5e645cc..9944dedee5b1 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -18,6 +18,8 @@ config ALPHA
18 select ARCH_HAVE_NMI_SAFE_CMPXCHG 18 select ARCH_HAVE_NMI_SAFE_CMPXCHG
19 select GENERIC_SMP_IDLE_THREAD 19 select GENERIC_SMP_IDLE_THREAD
20 select GENERIC_CMOS_UPDATE 20 select GENERIC_CMOS_UPDATE
21 select GENERIC_STRNCPY_FROM_USER
22 select GENERIC_STRNLEN_USER
21 help 23 help
22 The Alpha is a 64-bit general-purpose processor designed and 24 The Alpha is a 64-bit general-purpose processor designed and
23 marketed by the Digital Equipment Corporation of blessed memory, 25 marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 3bb7ffeae3bc..c2cbe4fc391c 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -14,8 +14,8 @@
14 */ 14 */
15 15
16 16
17#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) 17#define ATOMIC_INIT(i) { (i) }
18#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) 18#define ATOMIC64_INIT(i) { (i) }
19 19
20#define atomic_read(v) (*(volatile int *)&(v)->counter) 20#define atomic_read(v) (*(volatile int *)&(v)->counter)
21#define atomic64_read(v) (*(volatile long *)&(v)->counter) 21#define atomic64_read(v) (*(volatile long *)&(v)->counter)
diff --git a/arch/alpha/include/asm/fpu.h b/arch/alpha/include/asm/fpu.h
index db00f7885faa..e477bcd5b94a 100644
--- a/arch/alpha/include/asm/fpu.h
+++ b/arch/alpha/include/asm/fpu.h
@@ -1,7 +1,9 @@
1#ifndef __ASM_ALPHA_FPU_H 1#ifndef __ASM_ALPHA_FPU_H
2#define __ASM_ALPHA_FPU_H 2#define __ASM_ALPHA_FPU_H
3 3
4#ifdef __KERNEL__
4#include <asm/special_insns.h> 5#include <asm/special_insns.h>
6#endif
5 7
6/* 8/*
7 * Alpha floating-point control register defines: 9 * Alpha floating-point control register defines:
diff --git a/arch/alpha/include/asm/ptrace.h b/arch/alpha/include/asm/ptrace.h
index fd698a174f26..b87755a19554 100644
--- a/arch/alpha/include/asm/ptrace.h
+++ b/arch/alpha/include/asm/ptrace.h
@@ -76,7 +76,10 @@ struct switch_stack {
76#define task_pt_regs(task) \ 76#define task_pt_regs(task) \
77 ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1) 77 ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
78 78
79#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0) 79#define current_pt_regs() \
80 ((struct pt_regs *) ((char *)current_thread_info() + 2*PAGE_SIZE) - 1)
81
82#define force_successful_syscall_return() (current_pt_regs()->r0 = 0)
80 83
81#endif 84#endif
82 85
diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h
index dcb221a4b5be..7d2f75be932e 100644
--- a/arch/alpha/include/asm/socket.h
+++ b/arch/alpha/include/asm/socket.h
@@ -76,9 +76,11 @@
76/* Instruct lower device to use last 4-bytes of skb data as FCS */ 76/* Instruct lower device to use last 4-bytes of skb data as FCS */
77#define SO_NOFCS 43 77#define SO_NOFCS 43
78 78
79#ifdef __KERNEL__
79/* O_NONBLOCK clashes with the bits used for socket types. Therefore we 80/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
80 * have to define SOCK_NONBLOCK to a different value here. 81 * have to define SOCK_NONBLOCK to a different value here.
81 */ 82 */
82#define SOCK_NONBLOCK 0x40000000 83#define SOCK_NONBLOCK 0x40000000
84#endif /* __KERNEL__ */
83 85
84#endif /* _ASM_SOCKET_H */ 86#endif /* _ASM_SOCKET_H */
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index b49ec2f8d6e3..766fdfde2b7a 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -433,36 +433,12 @@ clear_user(void __user *to, long len)
433#undef __module_address 433#undef __module_address
434#undef __module_call 434#undef __module_call
435 435
436/* Returns: -EFAULT if exception before terminator, N if the entire 436#define user_addr_max() \
437 buffer filled, else strlen. */ 437 (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
438 438
439extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len); 439extern long strncpy_from_user(char *dest, const char __user *src, long count);
440 440extern __must_check long strlen_user(const char __user *str);
441extern inline long 441extern __must_check long strnlen_user(const char __user *str, long n);
442strncpy_from_user(char *to, const char __user *from, long n)
443{
444 long ret = -EFAULT;
445 if (__access_ok((unsigned long)from, 0, get_fs()))
446 ret = __strncpy_from_user(to, from, n);
447 return ret;
448}
449
450/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
451extern long __strlen_user(const char __user *);
452
453extern inline long strlen_user(const char __user *str)
454{
455 return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0;
456}
457
458/* Returns: 0 if exception before NUL or reaching the supplied limit (N),
459 * a value greater than N if the limit would be exceeded, else strlen. */
460extern long __strnlen_user(const char __user *, long);
461
462extern inline long strnlen_user(const char __user *str, long n)
463{
464 return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0;
465}
466 442
467/* 443/*
468 * About the exception table: 444 * About the exception table:
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 633b23b0664a..a31a78eac9b9 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -465,10 +465,12 @@
465#define __NR_setns 501 465#define __NR_setns 501
466#define __NR_accept4 502 466#define __NR_accept4 502
467#define __NR_sendmmsg 503 467#define __NR_sendmmsg 503
468#define __NR_process_vm_readv 504
469#define __NR_process_vm_writev 505
468 470
469#ifdef __KERNEL__ 471#ifdef __KERNEL__
470 472
471#define NR_SYSCALLS 504 473#define NR_SYSCALLS 506
472 474
473#define __ARCH_WANT_OLD_READDIR 475#define __ARCH_WANT_OLD_READDIR
474#define __ARCH_WANT_STAT64 476#define __ARCH_WANT_STAT64
diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..6b340d0f1521
--- /dev/null
+++ b/arch/alpha/include/asm/word-at-a-time.h
@@ -0,0 +1,55 @@
1#ifndef _ASM_WORD_AT_A_TIME_H
2#define _ASM_WORD_AT_A_TIME_H
3
4#include <asm/compiler.h>
5
6/*
7 * word-at-a-time interface for Alpha.
8 */
9
10/*
11 * We do not use the word_at_a_time struct on Alpha, but it needs to be
12 * implemented to humour the generic code.
13 */
14struct word_at_a_time {
15 const unsigned long unused;
16};
17
18#define WORD_AT_A_TIME_CONSTANTS { 0 }
19
20/* Return nonzero if val has a zero */
21static inline unsigned long has_zero(unsigned long val, unsigned long *bits, const struct word_at_a_time *c)
22{
23 unsigned long zero_locations = __kernel_cmpbge(0, val);
24 *bits = zero_locations;
25 return zero_locations;
26}
27
28static inline unsigned long prep_zero_mask(unsigned long val, unsigned long bits, const struct word_at_a_time *c)
29{
30 return bits;
31}
32
33#define create_zero_mask(bits) (bits)
34
35static inline unsigned long find_zero(unsigned long bits)
36{
37#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
38 /* Simple if have CIX instructions */
39 return __kernel_cttz(bits);
40#else
41 unsigned long t1, t2, t3;
42 /* Retain lowest set bit only */
43 bits &= -bits;
44 /* Binary search for lowest set bit */
45 t1 = bits & 0xf0;
46 t2 = bits & 0xcc;
47 t3 = bits & 0xaa;
48 if (t1) t1 = 4;
49 if (t2) t2 = 2;
50 if (t3) t3 = 1;
51 return t1 + t2 + t3;
52#endif
53}
54
55#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index d96e742d4dc2..15fa821d09cd 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -52,7 +52,6 @@ EXPORT_SYMBOL(alpha_write_fp_reg_s);
52 52
53/* entry.S */ 53/* entry.S */
54EXPORT_SYMBOL(kernel_thread); 54EXPORT_SYMBOL(kernel_thread);
55EXPORT_SYMBOL(kernel_execve);
56 55
57/* Networking helper routines. */ 56/* Networking helper routines. */
58EXPORT_SYMBOL(csum_tcpudp_magic); 57EXPORT_SYMBOL(csum_tcpudp_magic);
@@ -74,8 +73,6 @@ EXPORT_SYMBOL(alpha_fp_emul);
74 */ 73 */
75EXPORT_SYMBOL(__copy_user); 74EXPORT_SYMBOL(__copy_user);
76EXPORT_SYMBOL(__do_clear_user); 75EXPORT_SYMBOL(__do_clear_user);
77EXPORT_SYMBOL(__strncpy_from_user);
78EXPORT_SYMBOL(__strnlen_user);
79 76
80/* 77/*
81 * SMP-specific symbols. 78 * SMP-specific symbols.
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index 6d159cee5f2f..ec0da0567ab5 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -663,58 +663,6 @@ kernel_thread:
663 br ret_to_kernel 663 br ret_to_kernel
664.end kernel_thread 664.end kernel_thread
665 665
666/*
667 * kernel_execve(path, argv, envp)
668 */
669 .align 4
670 .globl kernel_execve
671 .ent kernel_execve
672kernel_execve:
673 /* We can be called from a module. */
674 ldgp $gp, 0($27)
675 lda $sp, -(32+SIZEOF_PT_REGS+8)($sp)
676 .frame $sp, 32+SIZEOF_PT_REGS+8, $26, 0
677 stq $26, 0($sp)
678 stq $16, 8($sp)
679 stq $17, 16($sp)
680 stq $18, 24($sp)
681 .prologue 1
682
683 lda $16, 32($sp)
684 lda $17, 0
685 lda $18, SIZEOF_PT_REGS
686 bsr $26, memset !samegp
687
688 /* Avoid the HAE being gratuitously wrong, which would cause us
689 to do the whole turn off interrupts thing and restore it. */
690 ldq $2, alpha_mv+HAE_CACHE
691 stq $2, 152+32($sp)
692
693 ldq $16, 8($sp)
694 ldq $17, 16($sp)
695 ldq $18, 24($sp)
696 lda $19, 32($sp)
697 bsr $26, do_execve !samegp
698
699 ldq $26, 0($sp)
700 bne $0, 1f /* error! */
701
702 /* Move the temporary pt_regs struct from its current location
703 to the top of the kernel stack frame. See copy_thread for
704 details for a normal process. */
705 lda $16, 0x4000 - SIZEOF_PT_REGS($8)
706 lda $17, 32($sp)
707 lda $18, SIZEOF_PT_REGS
708 bsr $26, memmove !samegp
709
710 /* Take that over as our new stack frame and visit userland! */
711 lda $sp, 0x4000 - SIZEOF_PT_REGS($8)
712 br $31, ret_from_sys_call
713
7141: lda $sp, 32+SIZEOF_PT_REGS+8($sp)
715 ret
716.end kernel_execve
717
718 666
719/* 667/*
720 * Special system calls. Most of these are special in that they either 668 * Special system calls. Most of these are special in that they either
@@ -797,115 +745,6 @@ sys_rt_sigreturn:
797.end sys_rt_sigreturn 745.end sys_rt_sigreturn
798 746
799 .align 4 747 .align 4
800 .globl sys_sethae
801 .ent sys_sethae
802sys_sethae:
803 .prologue 0
804 stq $16, 152($sp)
805 ret
806.end sys_sethae
807
808 .align 4
809 .globl osf_getpriority
810 .ent osf_getpriority
811osf_getpriority:
812 lda $sp, -16($sp)
813 stq $26, 0($sp)
814 .prologue 0
815
816 jsr $26, sys_getpriority
817
818 ldq $26, 0($sp)
819 blt $0, 1f
820
821 /* Return value is the unbiased priority, i.e. 20 - prio.
822 This does result in negative return values, so signal
823 no error by writing into the R0 slot. */
824 lda $1, 20
825 stq $31, 16($sp)
826 subl $1, $0, $0
827 unop
828
8291: lda $sp, 16($sp)
830 ret
831.end osf_getpriority
832
833 .align 4
834 .globl sys_getxuid
835 .ent sys_getxuid
836sys_getxuid:
837 .prologue 0
838 ldq $2, TI_TASK($8)
839 ldq $3, TASK_CRED($2)
840 ldl $0, CRED_UID($3)
841 ldl $1, CRED_EUID($3)
842 stq $1, 80($sp)
843 ret
844.end sys_getxuid
845
846 .align 4
847 .globl sys_getxgid
848 .ent sys_getxgid
849sys_getxgid:
850 .prologue 0
851 ldq $2, TI_TASK($8)
852 ldq $3, TASK_CRED($2)
853 ldl $0, CRED_GID($3)
854 ldl $1, CRED_EGID($3)
855 stq $1, 80($sp)
856 ret
857.end sys_getxgid
858
859 .align 4
860 .globl sys_getxpid
861 .ent sys_getxpid
862sys_getxpid:
863 .prologue 0
864 ldq $2, TI_TASK($8)
865
866 /* See linux/kernel/timer.c sys_getppid for discussion
867 about this loop. */
868 ldq $3, TASK_GROUP_LEADER($2)
869 ldq $4, TASK_REAL_PARENT($3)
870 ldl $0, TASK_TGID($2)
8711: ldl $1, TASK_TGID($4)
872#ifdef CONFIG_SMP
873 mov $4, $5
874 mb
875 ldq $3, TASK_GROUP_LEADER($2)
876 ldq $4, TASK_REAL_PARENT($3)
877 cmpeq $4, $5, $5
878 beq $5, 1b
879#endif
880 stq $1, 80($sp)
881 ret
882.end sys_getxpid
883
884 .align 4
885 .globl sys_alpha_pipe
886 .ent sys_alpha_pipe
887sys_alpha_pipe:
888 lda $sp, -16($sp)
889 stq $26, 0($sp)
890 .prologue 0
891
892 mov $31, $17
893 lda $16, 8($sp)
894 jsr $26, do_pipe_flags
895
896 ldq $26, 0($sp)
897 bne $0, 1f
898
899 /* The return values are in $0 and $20. */
900 ldl $1, 12($sp)
901 ldl $0, 8($sp)
902
903 stq $1, 80+16($sp)
9041: lda $sp, 16($sp)
905 ret
906.end sys_alpha_pipe
907
908 .align 4
909 .globl sys_execve 748 .globl sys_execve
910 .ent sys_execve 749 .ent sys_execve
911sys_execve: 750sys_execve:
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 98a103621af6..bc1acdda7a5e 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1404,3 +1404,52 @@ SYSCALL_DEFINE3(osf_writev, unsigned long, fd,
1404} 1404}
1405 1405
1406#endif 1406#endif
1407
1408SYSCALL_DEFINE2(osf_getpriority, int, which, int, who)
1409{
1410 int prio = sys_getpriority(which, who);
1411 if (prio >= 0) {
1412 /* Return value is the unbiased priority, i.e. 20 - prio.
1413 This does result in negative return values, so signal
1414 no error */
1415 force_successful_syscall_return();
1416 prio = 20 - prio;
1417 }
1418 return prio;
1419}
1420
1421SYSCALL_DEFINE0(getxuid)
1422{
1423 current_pt_regs()->r20 = sys_geteuid();
1424 return sys_getuid();
1425}
1426
1427SYSCALL_DEFINE0(getxgid)
1428{
1429 current_pt_regs()->r20 = sys_getegid();
1430 return sys_getgid();
1431}
1432
1433SYSCALL_DEFINE0(getxpid)
1434{
1435 current_pt_regs()->r20 = sys_getppid();
1436 return sys_getpid();
1437}
1438
1439SYSCALL_DEFINE0(alpha_pipe)
1440{
1441 int fd[2];
1442 int res = do_pipe_flags(fd, 0);
1443 if (!res) {
1444 /* The return values are in $0 and $20. */
1445 current_pt_regs()->r20 = fd[1];
1446 res = fd[0];
1447 }
1448 return res;
1449}
1450
1451SYSCALL_DEFINE1(sethae, unsigned long, val)
1452{
1453 current_pt_regs()->hae = val;
1454 return 0;
1455}
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 153d3fce3e8e..d6fde98b74b3 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -455,3 +455,22 @@ get_wchan(struct task_struct *p)
455 } 455 }
456 return pc; 456 return pc;
457} 457}
458
459int kernel_execve(const char *path, const char *const argv[], const char *const envp[])
460{
461 /* Avoid the HAE being gratuitously wrong, which would cause us
462 to do the whole turn off interrupts thing and restore it. */
463 struct pt_regs regs = {.hae = alpha_mv.hae_cache};
464 int err = do_execve(path, argv, envp, &regs);
465 if (!err) {
466 struct pt_regs *p = current_pt_regs();
467 /* copy regs to normal position and off to userland we go... */
468 *p = regs;
469 __asm__ __volatile__ (
470 "mov %0, $sp;"
471 "br $31, ret_from_sys_call"
472 : : "r"(p));
473 }
474 return err;
475}
476EXPORT_SYMBOL(kernel_execve);
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 87835235f114..2ac6b45c3e00 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -111,7 +111,7 @@ sys_call_table:
111 .quad sys_socket 111 .quad sys_socket
112 .quad sys_connect 112 .quad sys_connect
113 .quad sys_accept 113 .quad sys_accept
114 .quad osf_getpriority /* 100 */ 114 .quad sys_osf_getpriority /* 100 */
115 .quad sys_send 115 .quad sys_send
116 .quad sys_recv 116 .quad sys_recv
117 .quad sys_sigreturn 117 .quad sys_sigreturn
@@ -522,6 +522,8 @@ sys_call_table:
522 .quad sys_setns 522 .quad sys_setns
523 .quad sys_accept4 523 .quad sys_accept4
524 .quad sys_sendmmsg 524 .quad sys_sendmmsg
525 .quad sys_process_vm_readv
526 .quad sys_process_vm_writev /* 505 */
525 527
526 .size sys_call_table, . - sys_call_table 528 .size sys_call_table, . - sys_call_table
527 .type sys_call_table, @object 529 .type sys_call_table, @object
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index c0a83ab62b78..59660743237c 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -31,8 +31,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
31 $(ev6-y)memchr.o \ 31 $(ev6-y)memchr.o \
32 $(ev6-y)copy_user.o \ 32 $(ev6-y)copy_user.o \
33 $(ev6-y)clear_user.o \ 33 $(ev6-y)clear_user.o \
34 $(ev6-y)strncpy_from_user.o \
35 $(ev67-y)strlen_user.o \
36 $(ev6-y)csum_ipv6_magic.o \ 34 $(ev6-y)csum_ipv6_magic.o \
37 $(ev6-y)clear_page.o \ 35 $(ev6-y)clear_page.o \
38 $(ev6-y)copy_page.o \ 36 $(ev6-y)copy_page.o \
diff --git a/arch/alpha/lib/ev6-strncpy_from_user.S b/arch/alpha/lib/ev6-strncpy_from_user.S
deleted file mode 100644
index d2e28178cacc..000000000000
--- a/arch/alpha/lib/ev6-strncpy_from_user.S
+++ /dev/null
@@ -1,424 +0,0 @@
1/*
2 * arch/alpha/lib/ev6-strncpy_from_user.S
3 * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
4 *
5 * Just like strncpy except in the return value:
6 *
7 * -EFAULT if an exception occurs before the terminator is copied.
8 * N if the buffer filled.
9 *
10 * Otherwise the length of the string is returned.
11 *
12 * Much of the information about 21264 scheduling/coding comes from:
13 * Compiler Writer's Guide for the Alpha 21264
14 * abbreviated as 'CWG' in other comments here
15 * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
16 * Scheduling notation:
17 * E - either cluster
18 * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
19 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
20 * A bunch of instructions got moved and temp registers were changed
21 * to aid in scheduling. Control flow was also re-arranged to eliminate
22 * branches, and to provide longer code sequences to enable better scheduling.
23 * A total rewrite (using byte load/stores for start & tail sequences)
24 * is desirable, but very difficult to do without a from-scratch rewrite.
25 * Save that for the future.
26 */
27
28
29#include <asm/errno.h>
30#include <asm/regdef.h>
31
32
33/* Allow an exception for an insn; exit if we get one. */
34#define EX(x,y...) \
35 99: x,##y; \
36 .section __ex_table,"a"; \
37 .long 99b - .; \
38 lda $31, $exception-99b($0); \
39 .previous
40
41
42 .set noat
43 .set noreorder
44 .text
45
46 .globl __strncpy_from_user
47 .ent __strncpy_from_user
48 .frame $30, 0, $26
49 .prologue 0
50
51 .align 4
52__strncpy_from_user:
53 and a0, 7, t3 # E : find dest misalignment
54 beq a2, $zerolength # U :
55
56 /* Are source and destination co-aligned? */
57 mov a0, v0 # E : save the string start
58 xor a0, a1, t4 # E :
59 EX( ldq_u t1, 0(a1) ) # L : Latency=3 load first quadword
60 ldq_u t0, 0(a0) # L : load first (partial) aligned dest quadword
61
62 addq a2, t3, a2 # E : bias count by dest misalignment
63 subq a2, 1, a3 # E :
64 addq zero, 1, t10 # E :
65 and t4, 7, t4 # E : misalignment between the two
66
67 and a3, 7, t6 # E : number of tail bytes
68 sll t10, t6, t10 # E : t10 = bitmask of last count byte
69 bne t4, $unaligned # U :
70 lda t2, -1 # E : build a mask against false zero
71
72 /*
73 * We are co-aligned; take care of a partial first word.
74 * On entry to this basic block:
75 * t0 == the first destination word for masking back in
76 * t1 == the first source word.
77 */
78
79 srl a3, 3, a2 # E : a2 = loop counter = (count - 1)/8
80 addq a1, 8, a1 # E :
81 mskqh t2, a1, t2 # U : detection in the src word
82 nop
83
84 /* Create the 1st output word and detect 0's in the 1st input word. */
85 mskqh t1, a1, t3 # U :
86 mskql t0, a1, t0 # U : assemble the first output word
87 ornot t1, t2, t2 # E :
88 nop
89
90 cmpbge zero, t2, t8 # E : bits set iff null found
91 or t0, t3, t0 # E :
92 beq a2, $a_eoc # U :
93 bne t8, $a_eos # U : 2nd branch in a quad. Bad.
94
95 /* On entry to this basic block:
96 * t0 == a source quad not containing a null.
97 * a0 - current aligned destination address
98 * a1 - current aligned source address
99 * a2 - count of quadwords to move.
100 * NOTE: Loop improvement - unrolling this is going to be
101 * a huge win, since we're going to stall otherwise.
102 * Fix this later. For _really_ large copies, look
103 * at using wh64 on a look-ahead basis. See the code
104 * in clear_user.S and copy_user.S.
105 * Presumably, since (a0) and (a1) do not overlap (by C definition)
106 * Lots of nops here:
107 * - Separate loads from stores
108 * - Keep it to 1 branch/quadpack so the branch predictor
109 * can train.
110 */
111$a_loop:
112 stq_u t0, 0(a0) # L :
113 addq a0, 8, a0 # E :
114 nop
115 subq a2, 1, a2 # E :
116
117 EX( ldq_u t0, 0(a1) ) # L :
118 addq a1, 8, a1 # E :
119 cmpbge zero, t0, t8 # E : Stall 2 cycles on t0
120 beq a2, $a_eoc # U :
121
122 beq t8, $a_loop # U :
123 nop
124 nop
125 nop
126
127 /* Take care of the final (partial) word store. At this point
128 * the end-of-count bit is set in t8 iff it applies.
129 *
130 * On entry to this basic block we have:
131 * t0 == the source word containing the null
132 * t8 == the cmpbge mask that found it.
133 */
134$a_eos:
135 negq t8, t12 # E : find low bit set
136 and t8, t12, t12 # E :
137
138 /* We're doing a partial word store and so need to combine
139 our source and original destination words. */
140 ldq_u t1, 0(a0) # L :
141 subq t12, 1, t6 # E :
142
143 or t12, t6, t8 # E :
144 zapnot t0, t8, t0 # U : clear src bytes > null
145 zap t1, t8, t1 # U : clear dst bytes <= null
146 or t0, t1, t0 # E :
147
148 stq_u t0, 0(a0) # L :
149 br $finish_up # L0 :
150 nop
151 nop
152
153 /* Add the end-of-count bit to the eos detection bitmask. */
154 .align 4
155$a_eoc:
156 or t10, t8, t8
157 br $a_eos
158 nop
159 nop
160
161
162/* The source and destination are not co-aligned. Align the destination
163 and cope. We have to be very careful about not reading too much and
164 causing a SEGV. */
165
166 .align 4
167$u_head:
168 /* We know just enough now to be able to assemble the first
169 full source word. We can still find a zero at the end of it
170 that prevents us from outputting the whole thing.
171
172 On entry to this basic block:
173 t0 == the first dest word, unmasked
174 t1 == the shifted low bits of the first source word
175 t6 == bytemask that is -1 in dest word bytes */
176
177 EX( ldq_u t2, 8(a1) ) # L : load second src word
178 addq a1, 8, a1 # E :
179 mskql t0, a0, t0 # U : mask trailing garbage in dst
180 extqh t2, a1, t4 # U :
181
182 or t1, t4, t1 # E : first aligned src word complete
183 mskqh t1, a0, t1 # U : mask leading garbage in src
184 or t0, t1, t0 # E : first output word complete
185 or t0, t6, t6 # E : mask original data for zero test
186
187 cmpbge zero, t6, t8 # E :
188 beq a2, $u_eocfin # U :
189 bne t8, $u_final # U : bad news - 2nd branch in a quad
190 lda t6, -1 # E : mask out the bits we have
191
192 mskql t6, a1, t6 # U : already seen
193 stq_u t0, 0(a0) # L : store first output word
194 or t6, t2, t2 # E :
195 cmpbge zero, t2, t8 # E : find nulls in second partial
196
197 addq a0, 8, a0 # E :
198 subq a2, 1, a2 # E :
199 bne t8, $u_late_head_exit # U :
200 nop
201
202 /* Finally, we've got all the stupid leading edge cases taken care
203 of and we can set up to enter the main loop. */
204
205 extql t2, a1, t1 # U : position hi-bits of lo word
206 EX( ldq_u t2, 8(a1) ) # L : read next high-order source word
207 addq a1, 8, a1 # E :
208 cmpbge zero, t2, t8 # E :
209
210 beq a2, $u_eoc # U :
211 bne t8, $u_eos # U :
212 nop
213 nop
214
215 /* Unaligned copy main loop. In order to avoid reading too much,
216 the loop is structured to detect zeros in aligned source words.
217 This has, unfortunately, effectively pulled half of a loop
218 iteration out into the head and half into the tail, but it does
219 prevent nastiness from accumulating in the very thing we want
220 to run as fast as possible.
221
222 On entry to this basic block:
223 t1 == the shifted high-order bits from the previous source word
224 t2 == the unshifted current source word
225
226 We further know that t2 does not contain a null terminator. */
227
228 /*
229 * Extra nops here:
230 * separate load quads from store quads
231 * only one branch/quad to permit predictor training
232 */
233
234 .align 4
235$u_loop:
236 extqh t2, a1, t0 # U : extract high bits for current word
237 addq a1, 8, a1 # E :
238 extql t2, a1, t3 # U : extract low bits for next time
239 addq a0, 8, a0 # E :
240
241 or t0, t1, t0 # E : current dst word now complete
242 EX( ldq_u t2, 0(a1) ) # L : load high word for next time
243 subq a2, 1, a2 # E :
244 nop
245
246 stq_u t0, -8(a0) # L : save the current word
247 mov t3, t1 # E :
248 cmpbge zero, t2, t8 # E : test new word for eos
249 beq a2, $u_eoc # U :
250
251 beq t8, $u_loop # U :
252 nop
253 nop
254 nop
255
256 /* We've found a zero somewhere in the source word we just read.
257 If it resides in the lower half, we have one (probably partial)
258 word to write out, and if it resides in the upper half, we
259 have one full and one partial word left to write out.
260
261 On entry to this basic block:
262 t1 == the shifted high-order bits from the previous source word
263 t2 == the unshifted current source word. */
264 .align 4
265$u_eos:
266 extqh t2, a1, t0 # U :
267 or t0, t1, t0 # E : first (partial) source word complete
268 cmpbge zero, t0, t8 # E : is the null in this first bit?
269 nop
270
271 bne t8, $u_final # U :
272 stq_u t0, 0(a0) # L : the null was in the high-order bits
273 addq a0, 8, a0 # E :
274 subq a2, 1, a2 # E :
275
276 .align 4
277$u_late_head_exit:
278 extql t2, a1, t0 # U :
279 cmpbge zero, t0, t8 # E :
280 or t8, t10, t6 # E :
281 cmoveq a2, t6, t8 # E :
282
283 /* Take care of a final (probably partial) result word.
284 On entry to this basic block:
285 t0 == assembled source word
286 t8 == cmpbge mask that found the null. */
287 .align 4
288$u_final:
289 negq t8, t6 # E : isolate low bit set
290 and t6, t8, t12 # E :
291 ldq_u t1, 0(a0) # L :
292 subq t12, 1, t6 # E :
293
294 or t6, t12, t8 # E :
295 zapnot t0, t8, t0 # U : kill source bytes > null
296 zap t1, t8, t1 # U : kill dest bytes <= null
297 or t0, t1, t0 # E :
298
299 stq_u t0, 0(a0) # E :
300 br $finish_up # U :
301 nop
302 nop
303
304 .align 4
305$u_eoc: # end-of-count
306 extqh t2, a1, t0 # U :
307 or t0, t1, t0 # E :
308 cmpbge zero, t0, t8 # E :
309 nop
310
311 .align 4
312$u_eocfin: # end-of-count, final word
313 or t10, t8, t8 # E :
314 br $u_final # U :
315 nop
316 nop
317
318 /* Unaligned copy entry point. */
319 .align 4
320$unaligned:
321
322 srl a3, 3, a2 # U : a2 = loop counter = (count - 1)/8
323 and a0, 7, t4 # E : find dest misalignment
324 and a1, 7, t5 # E : find src misalignment
325 mov zero, t0 # E :
326
327 /* Conditionally load the first destination word and a bytemask
328 with 0xff indicating that the destination byte is sacrosanct. */
329
330 mov zero, t6 # E :
331 beq t4, 1f # U :
332 ldq_u t0, 0(a0) # L :
333 lda t6, -1 # E :
334
335 mskql t6, a0, t6 # E :
336 nop
337 nop
338 nop
339
340 .align 4
3411:
342 subq a1, t4, a1 # E : sub dest misalignment from src addr
343 /* If source misalignment is larger than dest misalignment, we need
344 extra startup checks to avoid SEGV. */
345 cmplt t4, t5, t12 # E :
346 extql t1, a1, t1 # U : shift src into place
347 lda t2, -1 # E : for creating masks later
348
349 beq t12, $u_head # U :
350 mskqh t2, t5, t2 # U : begin src byte validity mask
351 cmpbge zero, t1, t8 # E : is there a zero?
352 nop
353
354 extql t2, a1, t2 # U :
355 or t8, t10, t5 # E : test for end-of-count too
356 cmpbge zero, t2, t3 # E :
357 cmoveq a2, t5, t8 # E : Latency=2, extra map slot
358
359 nop # E : goes with cmov
360 andnot t8, t3, t8 # E :
361 beq t8, $u_head # U :
362 nop
363
364 /* At this point we've found a zero in the first partial word of
365 the source. We need to isolate the valid source data and mask
366 it into the original destination data. (Incidentally, we know
367 that we'll need at least one byte of that original dest word.) */
368
369 ldq_u t0, 0(a0) # L :
370 negq t8, t6 # E : build bitmask of bytes <= zero
371 mskqh t1, t4, t1 # U :
372 and t6, t8, t12 # E :
373
374 subq t12, 1, t6 # E :
375 or t6, t12, t8 # E :
376 zapnot t2, t8, t2 # U : prepare source word; mirror changes
377 zapnot t1, t8, t1 # U : to source validity mask
378
379 andnot t0, t2, t0 # E : zero place for source to reside
380 or t0, t1, t0 # E : and put it there
381 stq_u t0, 0(a0) # L :
382 nop
383
384 .align 4
385$finish_up:
386 zapnot t0, t12, t4 # U : was last byte written null?
387 and t12, 0xf0, t3 # E : binary search for the address of the
388 cmovne t4, 1, t4 # E : Latency=2, extra map slot
389 nop # E : with cmovne
390
391 and t12, 0xcc, t2 # E : last byte written
392 and t12, 0xaa, t1 # E :
393 cmovne t3, 4, t3 # E : Latency=2, extra map slot
394 nop # E : with cmovne
395
396 bic a0, 7, t0
397 cmovne t2, 2, t2 # E : Latency=2, extra map slot
398 nop # E : with cmovne
399 nop
400
401 cmovne t1, 1, t1 # E : Latency=2, extra map slot
402 nop # E : with cmovne
403 addq t0, t3, t0 # E :
404 addq t1, t2, t1 # E :
405
406 addq t0, t1, t0 # E :
407 addq t0, t4, t0 # add one if we filled the buffer
408 subq t0, v0, v0 # find string length
409 ret # L0 :
410
411 .align 4
412$zerolength:
413 nop
414 nop
415 nop
416 clr v0
417
418$exception:
419 nop
420 nop
421 nop
422 ret
423
424 .end __strncpy_from_user
diff --git a/arch/alpha/lib/ev67-strlen_user.S b/arch/alpha/lib/ev67-strlen_user.S
deleted file mode 100644
index 57e0d77b81a6..000000000000
--- a/arch/alpha/lib/ev67-strlen_user.S
+++ /dev/null
@@ -1,107 +0,0 @@
1/*
2 * arch/alpha/lib/ev67-strlen_user.S
3 * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com>
4 *
5 * Return the length of the string including the NULL terminator
6 * (strlen+1) or zero if an error occurred.
7 *
8 * In places where it is critical to limit the processing time,
9 * and the data is not trusted, strnlen_user() should be used.
10 * It will return a value greater than its second argument if
11 * that limit would be exceeded. This implementation is allowed
12 * to access memory beyond the limit, but will not cross a page
13 * boundary when doing so.
14 *
15 * Much of the information about 21264 scheduling/coding comes from:
16 * Compiler Writer's Guide for the Alpha 21264
17 * abbreviated as 'CWG' in other comments here
18 * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
19 * Scheduling notation:
20 * E - either cluster
21 * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
22 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
23 * Try not to change the actual algorithm if possible for consistency.
24 */
25
26#include <asm/regdef.h>
27
28
29/* Allow an exception for an insn; exit if we get one. */
30#define EX(x,y...) \
31 99: x,##y; \
32 .section __ex_table,"a"; \
33 .long 99b - .; \
34 lda v0, $exception-99b(zero); \
35 .previous
36
37
38 .set noreorder
39 .set noat
40 .text
41
42 .globl __strlen_user
43 .ent __strlen_user
44 .frame sp, 0, ra
45
46 .align 4
47__strlen_user:
48 ldah a1, 32767(zero) # do not use plain strlen_user() for strings
49 # that might be almost 2 GB long; you should
50 # be using strnlen_user() instead
51 nop
52 nop
53 nop
54
55 .globl __strnlen_user
56
57 .align 4
58__strnlen_user:
59 .prologue 0
60 EX( ldq_u t0, 0(a0) ) # L : load first quadword (a0 may be misaligned)
61 lda t1, -1(zero) # E :
62
63 insqh t1, a0, t1 # U :
64 andnot a0, 7, v0 # E :
65 or t1, t0, t0 # E :
66 subq a0, 1, a0 # E : get our +1 for the return
67
68 cmpbge zero, t0, t1 # E : t1 <- bitmask: bit i == 1 <==> i-th byte == 0
69 subq a1, 7, t2 # E :
70 subq a0, v0, t0 # E :
71 bne t1, $found # U :
72
73 addq t2, t0, t2 # E :
74 addq a1, 1, a1 # E :
75 nop # E :
76 nop # E :
77
78 .align 4
79$loop: ble t2, $limit # U :
80 EX( ldq t0, 8(v0) ) # L :
81 nop # E :
82 nop # E :
83
84 cmpbge zero, t0, t1 # E :
85 subq t2, 8, t2 # E :
86 addq v0, 8, v0 # E : addr += 8
87 beq t1, $loop # U :
88
89$found: cttz t1, t2 # U0 :
90 addq v0, t2, v0 # E :
91 subq v0, a0, v0 # E :
92 ret # L0 :
93
94$exception:
95 nop
96 nop
97 nop
98 ret
99
100 .align 4 # currently redundant
101$limit:
102 nop
103 nop
104 subq a1, t2, v0
105 ret
106
107 .end __strlen_user
diff --git a/arch/alpha/lib/strlen_user.S b/arch/alpha/lib/strlen_user.S
deleted file mode 100644
index 508a18e96479..000000000000
--- a/arch/alpha/lib/strlen_user.S
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * arch/alpha/lib/strlen_user.S
3 *
4 * Return the length of the string including the NUL terminator
5 * (strlen+1) or zero if an error occurred.
6 *
7 * In places where it is critical to limit the processing time,
8 * and the data is not trusted, strnlen_user() should be used.
9 * It will return a value greater than its second argument if
10 * that limit would be exceeded. This implementation is allowed
11 * to access memory beyond the limit, but will not cross a page
12 * boundary when doing so.
13 */
14
15#include <asm/regdef.h>
16
17
18/* Allow an exception for an insn; exit if we get one. */
19#define EX(x,y...) \
20 99: x,##y; \
21 .section __ex_table,"a"; \
22 .long 99b - .; \
23 lda v0, $exception-99b(zero); \
24 .previous
25
26
27 .set noreorder
28 .set noat
29 .text
30
31 .globl __strlen_user
32 .ent __strlen_user
33 .frame sp, 0, ra
34
35 .align 3
36__strlen_user:
37 ldah a1, 32767(zero) # do not use plain strlen_user() for strings
38 # that might be almost 2 GB long; you should
39 # be using strnlen_user() instead
40
41 .globl __strnlen_user
42
43 .align 3
44__strnlen_user:
45 .prologue 0
46
47 EX( ldq_u t0, 0(a0) ) # load first quadword (a0 may be misaligned)
48 lda t1, -1(zero)
49 insqh t1, a0, t1
50 andnot a0, 7, v0
51 or t1, t0, t0
52 subq a0, 1, a0 # get our +1 for the return
53 cmpbge zero, t0, t1 # t1 <- bitmask: bit i == 1 <==> i-th byte == 0
54 subq a1, 7, t2
55 subq a0, v0, t0
56 bne t1, $found
57
58 addq t2, t0, t2
59 addq a1, 1, a1
60
61 .align 3
62$loop: ble t2, $limit
63 EX( ldq t0, 8(v0) )
64 subq t2, 8, t2
65 addq v0, 8, v0 # addr += 8
66 cmpbge zero, t0, t1
67 beq t1, $loop
68
69$found: negq t1, t2 # clear all but least set bit
70 and t1, t2, t1
71
72 and t1, 0xf0, t2 # binary search for that set bit
73 and t1, 0xcc, t3
74 and t1, 0xaa, t4
75 cmovne t2, 4, t2
76 cmovne t3, 2, t3
77 cmovne t4, 1, t4
78 addq t2, t3, t2
79 addq v0, t4, v0
80 addq v0, t2, v0
81 nop # dual issue next two on ev4 and ev5
82 subq v0, a0, v0
83$exception:
84 ret
85
86 .align 3 # currently redundant
87$limit:
88 subq a1, t2, v0
89 ret
90
91 .end __strlen_user
diff --git a/arch/alpha/lib/strncpy_from_user.S b/arch/alpha/lib/strncpy_from_user.S
deleted file mode 100644
index 73ee21160ff7..000000000000
--- a/arch/alpha/lib/strncpy_from_user.S
+++ /dev/null
@@ -1,339 +0,0 @@
1/*
2 * arch/alpha/lib/strncpy_from_user.S
3 * Contributed by Richard Henderson (rth@tamu.edu)
4 *
5 * Just like strncpy except in the return value:
6 *
7 * -EFAULT if an exception occurs before the terminator is copied.
8 * N if the buffer filled.
9 *
10 * Otherwise the length of the string is returned.
11 */
12
13
14#include <asm/errno.h>
15#include <asm/regdef.h>
16
17
18/* Allow an exception for an insn; exit if we get one. */
19#define EX(x,y...) \
20 99: x,##y; \
21 .section __ex_table,"a"; \
22 .long 99b - .; \
23 lda $31, $exception-99b($0); \
24 .previous
25
26
27 .set noat
28 .set noreorder
29 .text
30
31 .globl __strncpy_from_user
32 .ent __strncpy_from_user
33 .frame $30, 0, $26
34 .prologue 0
35
36 .align 3
37$aligned:
38 /* On entry to this basic block:
39 t0 == the first destination word for masking back in
40 t1 == the first source word. */
41
42 /* Create the 1st output word and detect 0's in the 1st input word. */
43 lda t2, -1 # e1 : build a mask against false zero
44 mskqh t2, a1, t2 # e0 : detection in the src word
45 mskqh t1, a1, t3 # e0 :
46 ornot t1, t2, t2 # .. e1 :
47 mskql t0, a1, t0 # e0 : assemble the first output word
48 cmpbge zero, t2, t8 # .. e1 : bits set iff null found
49 or t0, t3, t0 # e0 :
50 beq a2, $a_eoc # .. e1 :
51 bne t8, $a_eos # .. e1 :
52
53 /* On entry to this basic block:
54 t0 == a source word not containing a null. */
55
56$a_loop:
57 stq_u t0, 0(a0) # e0 :
58 addq a0, 8, a0 # .. e1 :
59 EX( ldq_u t0, 0(a1) ) # e0 :
60 addq a1, 8, a1 # .. e1 :
61 subq a2, 1, a2 # e0 :
62 cmpbge zero, t0, t8 # .. e1 (stall)
63 beq a2, $a_eoc # e1 :
64 beq t8, $a_loop # e1 :
65
66 /* Take care of the final (partial) word store. At this point
67 the end-of-count bit is set in t8 iff it applies.
68
69 On entry to this basic block we have:
70 t0 == the source word containing the null
71 t8 == the cmpbge mask that found it. */
72
73$a_eos:
74 negq t8, t12 # e0 : find low bit set
75 and t8, t12, t12 # e1 (stall)
76
77 /* For the sake of the cache, don't read a destination word
78 if we're not going to need it. */
79 and t12, 0x80, t6 # e0 :
80 bne t6, 1f # .. e1 (zdb)
81
82 /* We're doing a partial word store and so need to combine
83 our source and original destination words. */
84 ldq_u t1, 0(a0) # e0 :
85 subq t12, 1, t6 # .. e1 :
86 or t12, t6, t8 # e0 :
87 unop #
88 zapnot t0, t8, t0 # e0 : clear src bytes > null
89 zap t1, t8, t1 # .. e1 : clear dst bytes <= null
90 or t0, t1, t0 # e1 :
91
921: stq_u t0, 0(a0)
93 br $finish_up
94
95 /* Add the end-of-count bit to the eos detection bitmask. */
96$a_eoc:
97 or t10, t8, t8
98 br $a_eos
99
100 /*** The Function Entry Point ***/
101 .align 3
102__strncpy_from_user:
103 mov a0, v0 # save the string start
104 beq a2, $zerolength
105
106 /* Are source and destination co-aligned? */
107 xor a0, a1, t1 # e0 :
108 and a0, 7, t0 # .. e1 : find dest misalignment
109 and t1, 7, t1 # e0 :
110 addq a2, t0, a2 # .. e1 : bias count by dest misalignment
111 subq a2, 1, a2 # e0 :
112 and a2, 7, t2 # e1 :
113 srl a2, 3, a2 # e0 : a2 = loop counter = (count - 1)/8
114 addq zero, 1, t10 # .. e1 :
115 sll t10, t2, t10 # e0 : t10 = bitmask of last count byte
116 bne t1, $unaligned # .. e1 :
117
118 /* We are co-aligned; take care of a partial first word. */
119
120 EX( ldq_u t1, 0(a1) ) # e0 : load first src word
121 addq a1, 8, a1 # .. e1 :
122
123 beq t0, $aligned # avoid loading dest word if not needed
124 ldq_u t0, 0(a0) # e0 :
125 br $aligned # .. e1 :
126
127
128/* The source and destination are not co-aligned. Align the destination
129 and cope. We have to be very careful about not reading too much and
130 causing a SEGV. */
131
132 .align 3
133$u_head:
134 /* We know just enough now to be able to assemble the first
135 full source word. We can still find a zero at the end of it
136 that prevents us from outputting the whole thing.
137
138 On entry to this basic block:
139 t0 == the first dest word, unmasked
140 t1 == the shifted low bits of the first source word
141 t6 == bytemask that is -1 in dest word bytes */
142
143 EX( ldq_u t2, 8(a1) ) # e0 : load second src word
144 addq a1, 8, a1 # .. e1 :
145 mskql t0, a0, t0 # e0 : mask trailing garbage in dst
146 extqh t2, a1, t4 # e0 :
147 or t1, t4, t1 # e1 : first aligned src word complete
148 mskqh t1, a0, t1 # e0 : mask leading garbage in src
149 or t0, t1, t0 # e0 : first output word complete
150 or t0, t6, t6 # e1 : mask original data for zero test
151 cmpbge zero, t6, t8 # e0 :
152 beq a2, $u_eocfin # .. e1 :
153 bne t8, $u_final # e1 :
154
155 lda t6, -1 # e1 : mask out the bits we have
156 mskql t6, a1, t6 # e0 : already seen
157 stq_u t0, 0(a0) # e0 : store first output word
158 or t6, t2, t2 # .. e1 :
159 cmpbge zero, t2, t8 # e0 : find nulls in second partial
160 addq a0, 8, a0 # .. e1 :
161 subq a2, 1, a2 # e0 :
162 bne t8, $u_late_head_exit # .. e1 :
163
164 /* Finally, we've got all the stupid leading edge cases taken care
165 of and we can set up to enter the main loop. */
166
167 extql t2, a1, t1 # e0 : position hi-bits of lo word
168 EX( ldq_u t2, 8(a1) ) # .. e1 : read next high-order source word
169 addq a1, 8, a1 # e0 :
170 cmpbge zero, t2, t8 # e1 (stall)
171 beq a2, $u_eoc # e1 :
172 bne t8, $u_eos # e1 :
173
174 /* Unaligned copy main loop. In order to avoid reading too much,
175 the loop is structured to detect zeros in aligned source words.
176 This has, unfortunately, effectively pulled half of a loop
177 iteration out into the head and half into the tail, but it does
178 prevent nastiness from accumulating in the very thing we want
179 to run as fast as possible.
180
181 On entry to this basic block:
182 t1 == the shifted high-order bits from the previous source word
183 t2 == the unshifted current source word
184
185 We further know that t2 does not contain a null terminator. */
186
187 .align 3
188$u_loop:
189 extqh t2, a1, t0 # e0 : extract high bits for current word
190 addq a1, 8, a1 # .. e1 :
191 extql t2, a1, t3 # e0 : extract low bits for next time
192 addq a0, 8, a0 # .. e1 :
193 or t0, t1, t0 # e0 : current dst word now complete
194 EX( ldq_u t2, 0(a1) ) # .. e1 : load high word for next time
195 stq_u t0, -8(a0) # e0 : save the current word
196 mov t3, t1 # .. e1 :
197 subq a2, 1, a2 # e0 :
198 cmpbge zero, t2, t8 # .. e1 : test new word for eos
199 beq a2, $u_eoc # e1 :
200 beq t8, $u_loop # e1 :
201
202 /* We've found a zero somewhere in the source word we just read.
203 If it resides in the lower half, we have one (probably partial)
204 word to write out, and if it resides in the upper half, we
205 have one full and one partial word left to write out.
206
207 On entry to this basic block:
208 t1 == the shifted high-order bits from the previous source word
209 t2 == the unshifted current source word. */
210$u_eos:
211 extqh t2, a1, t0 # e0 :
212 or t0, t1, t0 # e1 : first (partial) source word complete
213
214 cmpbge zero, t0, t8 # e0 : is the null in this first bit?
215 bne t8, $u_final # .. e1 (zdb)
216
217 stq_u t0, 0(a0) # e0 : the null was in the high-order bits
218 addq a0, 8, a0 # .. e1 :
219 subq a2, 1, a2 # e1 :
220
221$u_late_head_exit:
222 extql t2, a1, t0 # .. e0 :
223 cmpbge zero, t0, t8 # e0 :
224 or t8, t10, t6 # e1 :
225 cmoveq a2, t6, t8 # e0 :
226 nop # .. e1 :
227
228 /* Take care of a final (probably partial) result word.
229 On entry to this basic block:
230 t0 == assembled source word
231 t8 == cmpbge mask that found the null. */
232$u_final:
233 negq t8, t6 # e0 : isolate low bit set
234 and t6, t8, t12 # e1 :
235
236 and t12, 0x80, t6 # e0 : avoid dest word load if we can
237 bne t6, 1f # .. e1 (zdb)
238
239 ldq_u t1, 0(a0) # e0 :
240 subq t12, 1, t6 # .. e1 :
241 or t6, t12, t8 # e0 :
242 zapnot t0, t8, t0 # .. e1 : kill source bytes > null
243 zap t1, t8, t1 # e0 : kill dest bytes <= null
244 or t0, t1, t0 # e1 :
245
2461: stq_u t0, 0(a0) # e0 :
247 br $finish_up
248
249$u_eoc: # end-of-count
250 extqh t2, a1, t0
251 or t0, t1, t0
252 cmpbge zero, t0, t8
253
254$u_eocfin: # end-of-count, final word
255 or t10, t8, t8
256 br $u_final
257
258 /* Unaligned copy entry point. */
259 .align 3
260$unaligned:
261
262 EX( ldq_u t1, 0(a1) ) # e0 : load first source word
263
264 and a0, 7, t4 # .. e1 : find dest misalignment
265 and a1, 7, t5 # e0 : find src misalignment
266
267 /* Conditionally load the first destination word and a bytemask
268 with 0xff indicating that the destination byte is sacrosanct. */
269
270 mov zero, t0 # .. e1 :
271 mov zero, t6 # e0 :
272 beq t4, 1f # .. e1 :
273 ldq_u t0, 0(a0) # e0 :
274 lda t6, -1 # .. e1 :
275 mskql t6, a0, t6 # e0 :
2761:
277 subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
278
279 /* If source misalignment is larger than dest misalignment, we need
280 extra startup checks to avoid SEGV. */
281
282 cmplt t4, t5, t12 # e1 :
283 extql t1, a1, t1 # .. e0 : shift src into place
284 lda t2, -1 # e0 : for creating masks later
285 beq t12, $u_head # e1 :
286
287 mskqh t2, t5, t2 # e0 : begin src byte validity mask
288 cmpbge zero, t1, t8 # .. e1 : is there a zero?
289 extql t2, a1, t2 # e0 :
290 or t8, t10, t5 # .. e1 : test for end-of-count too
291 cmpbge zero, t2, t3 # e0 :
292 cmoveq a2, t5, t8 # .. e1 :
293 andnot t8, t3, t8 # e0 :
294 beq t8, $u_head # .. e1 (zdb)
295
296 /* At this point we've found a zero in the first partial word of
297 the source. We need to isolate the valid source data and mask
298 it into the original destination data. (Incidentally, we know
299 that we'll need at least one byte of that original dest word.) */
300
301 ldq_u t0, 0(a0) # e0 :
302 negq t8, t6 # .. e1 : build bitmask of bytes <= zero
303 mskqh t1, t4, t1 # e0 :
304 and t6, t8, t12 # .. e1 :
305 subq t12, 1, t6 # e0 :
306 or t6, t12, t8 # e1 :
307
308 zapnot t2, t8, t2 # e0 : prepare source word; mirror changes
309 zapnot t1, t8, t1 # .. e1 : to source validity mask
310
311 andnot t0, t2, t0 # e0 : zero place for source to reside
312 or t0, t1, t0 # e1 : and put it there
313 stq_u t0, 0(a0) # e0 :
314
315$finish_up:
316 zapnot t0, t12, t4 # was last byte written null?
317 cmovne t4, 1, t4
318
319 and t12, 0xf0, t3 # binary search for the address of the
320 and t12, 0xcc, t2 # last byte written
321 and t12, 0xaa, t1
322 bic a0, 7, t0
323 cmovne t3, 4, t3
324 cmovne t2, 2, t2
325 cmovne t1, 1, t1
326 addq t0, t3, t0
327 addq t1, t2, t1
328 addq t0, t1, t0
329 addq t0, t4, t0 # add one if we filled the buffer
330
331 subq t0, v0, v0 # find string length
332 ret
333
334$zerolength:
335 clr v0
336$exception:
337 ret
338
339 .end __strncpy_from_user
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 5eecab1a84ef..0c4132dd3507 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -89,6 +89,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
89 const struct exception_table_entry *fixup; 89 const struct exception_table_entry *fixup;
90 int fault, si_code = SEGV_MAPERR; 90 int fault, si_code = SEGV_MAPERR;
91 siginfo_t info; 91 siginfo_t info;
92 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
93 (cause > 0 ? FAULT_FLAG_WRITE : 0));
92 94
93 /* As of EV6, a load into $31/$f31 is a prefetch, and never faults 95 /* As of EV6, a load into $31/$f31 is a prefetch, and never faults
94 (or is suppressed by the PALcode). Support that for older CPUs 96 (or is suppressed by the PALcode). Support that for older CPUs
@@ -114,6 +116,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
114 goto vmalloc_fault; 116 goto vmalloc_fault;
115#endif 117#endif
116 118
119retry:
117 down_read(&mm->mmap_sem); 120 down_read(&mm->mmap_sem);
118 vma = find_vma(mm, address); 121 vma = find_vma(mm, address);
119 if (!vma) 122 if (!vma)
@@ -144,8 +147,11 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
144 /* If for any reason at all we couldn't handle the fault, 147 /* If for any reason at all we couldn't handle the fault,
145 make sure we exit gracefully rather than endlessly redo 148 make sure we exit gracefully rather than endlessly redo
146 the fault. */ 149 the fault. */
147 fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0); 150 fault = handle_mm_fault(mm, vma, address, flags);
148 up_read(&mm->mmap_sem); 151
152 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
153 return;
154
149 if (unlikely(fault & VM_FAULT_ERROR)) { 155 if (unlikely(fault & VM_FAULT_ERROR)) {
150 if (fault & VM_FAULT_OOM) 156 if (fault & VM_FAULT_OOM)
151 goto out_of_memory; 157 goto out_of_memory;
@@ -153,10 +159,26 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
153 goto do_sigbus; 159 goto do_sigbus;
154 BUG(); 160 BUG();
155 } 161 }
156 if (fault & VM_FAULT_MAJOR) 162
157 current->maj_flt++; 163 if (flags & FAULT_FLAG_ALLOW_RETRY) {
158 else 164 if (fault & VM_FAULT_MAJOR)
159 current->min_flt++; 165 current->maj_flt++;
166 else
167 current->min_flt++;
168 if (fault & VM_FAULT_RETRY) {
169 flags &= ~FAULT_FLAG_ALLOW_RETRY;
170
171 /* No need to up_read(&mm->mmap_sem) as we would
172 * have already released it in __lock_page_or_retry
173 * in mm/filemap.c.
174 */
175
176 goto retry;
177 }
178 }
179
180 up_read(&mm->mmap_sem);
181
160 return; 182 return;
161 183
162 /* Something tried to access memory that isn't in our memory map. 184 /* Something tried to access memory that isn't in our memory map.
@@ -186,12 +208,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
186 /* We ran out of memory, or some other thing happened to us that 208 /* We ran out of memory, or some other thing happened to us that
187 made us unable to handle the page fault gracefully. */ 209 made us unable to handle the page fault gracefully. */
188 out_of_memory: 210 out_of_memory:
211 up_read(&mm->mmap_sem);
189 if (!user_mode(regs)) 212 if (!user_mode(regs))
190 goto no_context; 213 goto no_context;
191 pagefault_out_of_memory(); 214 pagefault_out_of_memory();
192 return; 215 return;
193 216
194 do_sigbus: 217 do_sigbus:
218 up_read(&mm->mmap_sem);
195 /* Send a sigbus, regardless of whether we were in kernel 219 /* Send a sigbus, regardless of whether we were in kernel
196 or user mode. */ 220 or user mode. */
197 info.si_signo = SIGBUS; 221 info.si_signo = SIGBUS;
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index a0a5d27aa215..b8ce18f485d3 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -12,6 +12,7 @@
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/special_insns.h>
15 16
16#include "op_impl.h" 17#include "op_impl.h"
17 18
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 6d6e18fee9fe..c5f9ae5dbd1a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2144,6 +2144,7 @@ source "drivers/cpufreq/Kconfig"
2144config CPU_FREQ_IMX 2144config CPU_FREQ_IMX
2145 tristate "CPUfreq driver for i.MX CPUs" 2145 tristate "CPUfreq driver for i.MX CPUs"
2146 depends on ARCH_MXC && CPU_FREQ 2146 depends on ARCH_MXC && CPU_FREQ
2147 select CPU_FREQ_TABLE
2147 help 2148 help
2148 This enables the CPUfreq driver for i.MX CPUs. 2149 This enables the CPUfreq driver for i.MX CPUs.
2149 2150
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 59509c48d7e5..bd0cff3f808c 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -154,5 +154,10 @@
154 #size-cells = <0>; 154 #size-cells = <0>;
155 ti,hwmods = "i2c3"; 155 ti,hwmods = "i2c3";
156 }; 156 };
157
158 wdt2: wdt@44e35000 {
159 compatible = "ti,omap3-wdt";
160 ti,hwmods = "wd_timer2";
161 };
157 }; 162 };
158}; 163};
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index cd86177a3ea2..59d9789e5508 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -25,8 +25,8 @@
25 aips@70000000 { /* aips-1 */ 25 aips@70000000 { /* aips-1 */
26 spba@70000000 { 26 spba@70000000 {
27 esdhc@70004000 { /* ESDHC1 */ 27 esdhc@70004000 { /* ESDHC1 */
28 fsl,cd-internal; 28 fsl,cd-controller;
29 fsl,wp-internal; 29 fsl,wp-controller;
30 status = "okay"; 30 status = "okay";
31 }; 31 };
32 32
diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts
index 52d947045106..f8ca6fa88192 100644
--- a/arch/arm/boot/dts/kirkwood-iconnect.dts
+++ b/arch/arm/boot/dts/kirkwood-iconnect.dts
@@ -41,9 +41,13 @@
41 }; 41 };
42 power-blue { 42 power-blue {
43 label = "power:blue"; 43 label = "power:blue";
44 gpios = <&gpio1 11 0>; 44 gpios = <&gpio1 10 0>;
45 linux,default-trigger = "timer"; 45 linux,default-trigger = "timer";
46 }; 46 };
47 power-red {
48 label = "power:red";
49 gpios = <&gpio1 11 0>;
50 };
47 usb1 { 51 usb1 {
48 label = "usb1:blue"; 52 label = "usb1:blue";
49 gpios = <&gpio1 12 0>; 53 gpios = <&gpio1 12 0>;
diff --git a/arch/arm/boot/dts/twl6030.dtsi b/arch/arm/boot/dts/twl6030.dtsi
index 3b2f3510d7eb..d351b27d7213 100644
--- a/arch/arm/boot/dts/twl6030.dtsi
+++ b/arch/arm/boot/dts/twl6030.dtsi
@@ -66,6 +66,7 @@
66 66
67 vcxio: regulator@8 { 67 vcxio: regulator@8 {
68 compatible = "ti,twl6030-vcxio"; 68 compatible = "ti,twl6030-vcxio";
69 regulator-always-on;
69 }; 70 };
70 71
71 vusb: regulator@9 { 72 vusb: regulator@9 {
@@ -74,10 +75,12 @@
74 75
75 v1v8: regulator@10 { 76 v1v8: regulator@10 {
76 compatible = "ti,twl6030-v1v8"; 77 compatible = "ti,twl6030-v1v8";
78 regulator-always-on;
77 }; 79 };
78 80
79 v2v1: regulator@11 { 81 v2v1: regulator@11 {
80 compatible = "ti,twl6030-v2v1"; 82 compatible = "ti,twl6030-v2v1";
83 regulator-always-on;
81 }; 84 };
82 85
83 clk32kg: regulator@12 { 86 clk32kg: regulator@12 {
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 2d4f661d1cf6..da6845493caa 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -86,6 +86,7 @@ CONFIG_NEW_LEDS=y
86CONFIG_LEDS_CLASS=y 86CONFIG_LEDS_CLASS=y
87CONFIG_LEDS_LM3530=y 87CONFIG_LEDS_LM3530=y
88CONFIG_LEDS_LP5521=y 88CONFIG_LEDS_LP5521=y
89CONFIG_LEDS_GPIO=y
89CONFIG_RTC_CLASS=y 90CONFIG_RTC_CLASS=y
90CONFIG_RTC_DRV_AB8500=y 91CONFIG_RTC_DRV_AB8500=y
91CONFIG_RTC_DRV_PL031=y 92CONFIG_RTC_DRV_PL031=y
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 4db5de54b6a7..6321567d8eaa 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -102,7 +102,8 @@ void __init dove_ehci1_init(void)
102void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data) 102void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
103{ 103{
104 orion_ge00_init(eth_data, DOVE_GE00_PHYS_BASE, 104 orion_ge00_init(eth_data, DOVE_GE00_PHYS_BASE,
105 IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR); 105 IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR,
106 1600);
106} 107}
107 108
108/***************************************************************************** 109/*****************************************************************************
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
index 5ca80307d6d7..4e574c24581c 100644
--- a/arch/arm/mach-exynos/mach-origen.c
+++ b/arch/arm/mach-exynos/mach-origen.c
@@ -42,6 +42,7 @@
42#include <plat/backlight.h> 42#include <plat/backlight.h>
43#include <plat/fb.h> 43#include <plat/fb.h>
44#include <plat/mfc.h> 44#include <plat/mfc.h>
45#include <plat/hdmi.h>
45 46
46#include <mach/ohci.h> 47#include <mach/ohci.h>
47#include <mach/map.h> 48#include <mach/map.h>
@@ -734,6 +735,11 @@ static void __init origen_bt_setup(void)
734 s3c_gpio_setpull(EXYNOS4_GPX2(2), S3C_GPIO_PULL_NONE); 735 s3c_gpio_setpull(EXYNOS4_GPX2(2), S3C_GPIO_PULL_NONE);
735} 736}
736 737
738/* I2C module and id for HDMIPHY */
739static struct i2c_board_info hdmiphy_info = {
740 I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38),
741};
742
737static void s5p_tv_setup(void) 743static void s5p_tv_setup(void)
738{ 744{
739 /* Direct HPD to HDMI chip */ 745 /* Direct HPD to HDMI chip */
@@ -781,6 +787,7 @@ static void __init origen_machine_init(void)
781 787
782 s5p_tv_setup(); 788 s5p_tv_setup();
783 s5p_i2c_hdmiphy_set_platdata(NULL); 789 s5p_i2c_hdmiphy_set_platdata(NULL);
790 s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);
784 791
785#ifdef CONFIG_DRM_EXYNOS 792#ifdef CONFIG_DRM_EXYNOS
786 s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata; 793 s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata;
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
index 3cfa688d274a..73f2bce097e1 100644
--- a/arch/arm/mach-exynos/mach-smdkv310.c
+++ b/arch/arm/mach-exynos/mach-smdkv310.c
@@ -40,6 +40,7 @@
40#include <plat/mfc.h> 40#include <plat/mfc.h>
41#include <plat/ehci.h> 41#include <plat/ehci.h>
42#include <plat/clock.h> 42#include <plat/clock.h>
43#include <plat/hdmi.h>
43 44
44#include <mach/map.h> 45#include <mach/map.h>
45#include <mach/ohci.h> 46#include <mach/ohci.h>
@@ -354,6 +355,11 @@ static struct platform_pwm_backlight_data smdkv310_bl_data = {
354 .pwm_period_ns = 1000, 355 .pwm_period_ns = 1000,
355}; 356};
356 357
358/* I2C module and id for HDMIPHY */
359static struct i2c_board_info hdmiphy_info = {
360 I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38),
361};
362
357static void s5p_tv_setup(void) 363static void s5p_tv_setup(void)
358{ 364{
359 /* direct HPD to HDMI chip */ 365 /* direct HPD to HDMI chip */
@@ -388,6 +394,7 @@ static void __init smdkv310_machine_init(void)
388 394
389 s5p_tv_setup(); 395 s5p_tv_setup();
390 s5p_i2c_hdmiphy_set_platdata(NULL); 396 s5p_i2c_hdmiphy_set_platdata(NULL);
397 s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);
391 398
392 samsung_keypad_set_platdata(&smdkv310_keypad_data); 399 samsung_keypad_set_platdata(&smdkv310_keypad_data);
393 400
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 07f7c226e4cf..d004d37ad9d8 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -9,7 +9,8 @@ obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o
9obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o 9obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
10obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o 10obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o
11 11
12obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o 12imx5-pm-$(CONFIG_PM) += pm-imx5.o
13obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o $(imx5-pm-y) cpu_op-mx51.o
13 14
14obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \ 15obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \
15 clk-pfd.o clk-busy.o 16 clk-pfd.o clk-busy.o
@@ -70,14 +71,13 @@ obj-$(CONFIG_DEBUG_LL) += lluart.o
70obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o 71obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
71obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o 72obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o
72obj-$(CONFIG_HAVE_IMX_SRC) += src.o 73obj-$(CONFIG_HAVE_IMX_SRC) += src.o
73obj-$(CONFIG_CPU_V7) += head-v7.o 74AFLAGS_headsmp.o :=-Wa,-march=armv7-a
74AFLAGS_head-v7.o :=-Wa,-march=armv7-a 75obj-$(CONFIG_SMP) += headsmp.o platsmp.o
75obj-$(CONFIG_SMP) += platsmp.o
76obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 76obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
77obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o 77obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
78 78
79ifeq ($(CONFIG_PM),y) 79ifeq ($(CONFIG_PM),y)
80obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o 80obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o
81endif 81endif
82 82
83# i.MX5 based machines 83# i.MX5 based machines
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index ea89520b6e22..4233d9e3531d 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -152,7 +152,7 @@ enum mx6q_clks {
152 ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3, 152 ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
153 usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg, 153 usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
154 pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg, 154 pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
155 ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, 155 ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, ldb_di0_div_3_5, ldb_di1_div_3_5,
156 clk_max 156 clk_max
157}; 157};
158 158
@@ -288,8 +288,10 @@ int __init mx6q_clocks_init(void)
288 clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3); 288 clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
289 clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3); 289 clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
290 clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3); 290 clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
291 clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_sel", base + 0x20, 10, 1); 291 clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
292 clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_sel", base + 0x20, 11, 1); 292 clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1);
293 clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
294 clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1);
293 clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3); 295 clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
294 clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3); 296 clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
295 clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3); 297 clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3);
diff --git a/arch/arm/mach-imx/head-v7.S b/arch/arm/mach-imx/headsmp.S
index 7e49deb128a4..7e49deb128a4 100644
--- a/arch/arm/mach-imx/head-v7.S
+++ b/arch/arm/mach-imx/headsmp.S
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 20ed2d56c1af..f8f7437c83b8 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -42,22 +42,6 @@ static inline void cpu_enter_lowpower(void)
42 : "cc"); 42 : "cc");
43} 43}
44 44
45static inline void cpu_leave_lowpower(void)
46{
47 unsigned int v;
48
49 asm volatile(
50 "mrc p15, 0, %0, c1, c0, 0\n"
51 " orr %0, %0, %1\n"
52 " mcr p15, 0, %0, c1, c0, 0\n"
53 " mrc p15, 0, %0, c1, c0, 1\n"
54 " orr %0, %0, %2\n"
55 " mcr p15, 0, %0, c1, c0, 1\n"
56 : "=&r" (v)
57 : "Ir" (CR_C), "Ir" (0x40)
58 : "cc");
59}
60
61/* 45/*
62 * platform-specific code to shutdown a CPU 46 * platform-specific code to shutdown a CPU
63 * 47 *
@@ -67,11 +51,10 @@ void platform_cpu_die(unsigned int cpu)
67{ 51{
68 cpu_enter_lowpower(); 52 cpu_enter_lowpower();
69 imx_enable_cpu(cpu, false); 53 imx_enable_cpu(cpu, false);
70 cpu_do_idle();
71 cpu_leave_lowpower();
72 54
73 /* We should never return from idle */ 55 /* spin here until hardware takes it down */
74 panic("cpu %d unexpectedly exit from shutdown\n", cpu); 56 while (1)
57 ;
75} 58}
76 59
77int platform_cpu_disable(unsigned int cpu) 60int platform_cpu_disable(unsigned int cpu)
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 5ec0608f2a76..045b3f6a387d 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -71,7 +71,7 @@ soft:
71/* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */ 71/* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
72static int ksz9021rn_phy_fixup(struct phy_device *phydev) 72static int ksz9021rn_phy_fixup(struct phy_device *phydev)
73{ 73{
74 if (IS_ENABLED(CONFIG_PHYLIB)) { 74 if (IS_BUILTIN(CONFIG_PHYLIB)) {
75 /* min rx data delay */ 75 /* min rx data delay */
76 phy_write(phydev, 0x0b, 0x8105); 76 phy_write(phydev, 0x0b, 0x8105);
77 phy_write(phydev, 0x0c, 0x0000); 77 phy_write(phydev, 0x0c, 0x0000);
@@ -112,7 +112,7 @@ put_clk:
112 112
113static void __init imx6q_sabrelite_init(void) 113static void __init imx6q_sabrelite_init(void)
114{ 114{
115 if (IS_ENABLED(CONFIG_PHYLIB)) 115 if (IS_BUILTIN(CONFIG_PHYLIB))
116 phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK, 116 phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
117 ksz9021rn_phy_fixup); 117 ksz9021rn_phy_fixup);
118 imx6q_sabrelite_cko1_setup(); 118 imx6q_sabrelite_cko1_setup();
diff --git a/arch/arm/mach-kirkwood/Makefile.boot b/arch/arm/mach-kirkwood/Makefile.boot
index a5717558ee89..a13299d758e1 100644
--- a/arch/arm/mach-kirkwood/Makefile.boot
+++ b/arch/arm/mach-kirkwood/Makefile.boot
@@ -7,7 +7,8 @@ dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns320.dtb
7dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns325.dtb 7dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns325.dtb
8dtb-$(CONFIG_MACH_ICONNECT_DT) += kirkwood-iconnect.dtb 8dtb-$(CONFIG_MACH_ICONNECT_DT) += kirkwood-iconnect.dtb
9dtb-$(CONFIG_MACH_IB62X0_DT) += kirkwood-ib62x0.dtb 9dtb-$(CONFIG_MACH_IB62X0_DT) += kirkwood-ib62x0.dtb
10dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-qnap-ts219.dtb 10dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-ts219-6281.dtb
11dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-ts219-6282.dtb
11dtb-$(CONFIG_MACH_GOFLEXNET_DT) += kirkwood-goflexnet.dtb 12dtb-$(CONFIG_MACH_GOFLEXNET_DT) += kirkwood-goflexnet.dtb
12dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb 13dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb
13dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb 14dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index c4b64adcbfce..3226077735b1 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -301,7 +301,7 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
301{ 301{
302 orion_ge00_init(eth_data, 302 orion_ge00_init(eth_data,
303 GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM, 303 GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
304 IRQ_KIRKWOOD_GE00_ERR); 304 IRQ_KIRKWOOD_GE00_ERR, 1600);
305 /* The interface forgets the MAC address assigned by u-boot if 305 /* The interface forgets the MAC address assigned by u-boot if
306 the clock is turned off, so claim the clk now. */ 306 the clock is turned off, so claim the clk now. */
307 clk_prepare_enable(ge0); 307 clk_prepare_enable(ge0);
@@ -315,7 +315,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
315{ 315{
316 orion_ge01_init(eth_data, 316 orion_ge01_init(eth_data,
317 GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM, 317 GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
318 IRQ_KIRKWOOD_GE01_ERR); 318 IRQ_KIRKWOOD_GE01_ERR, 1600);
319 clk_prepare_enable(ge1); 319 clk_prepare_enable(ge1);
320} 320}
321 321
diff --git a/arch/arm/mach-mmp/sram.c b/arch/arm/mach-mmp/sram.c
index 4304f9519372..7e8a5a2e1ec7 100644
--- a/arch/arm/mach-mmp/sram.c
+++ b/arch/arm/mach-mmp/sram.c
@@ -68,7 +68,7 @@ static int __devinit sram_probe(struct platform_device *pdev)
68 struct resource *res; 68 struct resource *res;
69 int ret = 0; 69 int ret = 0;
70 70
71 if (!pdata && !pdata->pool_name) 71 if (!pdata || !pdata->pool_name)
72 return -ENODEV; 72 return -ENODEV;
73 73
74 info = kzalloc(sizeof(*info), GFP_KERNEL); 74 info = kzalloc(sizeof(*info), GFP_KERNEL);
diff --git a/arch/arm/mach-mv78xx0/addr-map.c b/arch/arm/mach-mv78xx0/addr-map.c
index 62b53d710efd..a9bc84180d21 100644
--- a/arch/arm/mach-mv78xx0/addr-map.c
+++ b/arch/arm/mach-mv78xx0/addr-map.c
@@ -37,7 +37,7 @@
37#define WIN0_OFF(n) (BRIDGE_VIRT_BASE + 0x0000 + ((n) << 4)) 37#define WIN0_OFF(n) (BRIDGE_VIRT_BASE + 0x0000 + ((n) << 4))
38#define WIN8_OFF(n) (BRIDGE_VIRT_BASE + 0x0900 + (((n) - 8) << 4)) 38#define WIN8_OFF(n) (BRIDGE_VIRT_BASE + 0x0900 + (((n) - 8) << 4))
39 39
40static void __init __iomem *win_cfg_base(int win) 40static void __init __iomem *win_cfg_base(const struct orion_addr_map_cfg *cfg, int win)
41{ 41{
42 /* 42 /*
43 * Find the control register base address for this window. 43 * Find the control register base address for this window.
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index b4c53b846c9c..3057f7d4329a 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -213,7 +213,8 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
213{ 213{
214 orion_ge00_init(eth_data, 214 orion_ge00_init(eth_data,
215 GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM, 215 GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
216 IRQ_MV78XX0_GE_ERR); 216 IRQ_MV78XX0_GE_ERR,
217 MV643XX_TX_CSUM_DEFAULT_LIMIT);
217} 218}
218 219
219 220
@@ -224,7 +225,8 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
224{ 225{
225 orion_ge01_init(eth_data, 226 orion_ge01_init(eth_data,
226 GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM, 227 GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
227 NO_IRQ); 228 NO_IRQ,
229 MV643XX_TX_CSUM_DEFAULT_LIMIT);
228} 230}
229 231
230 232
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index dd2db025f778..fcd4e85c4ddc 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -62,13 +62,14 @@ config ARCH_OMAP4
62 select PM_OPP if PM 62 select PM_OPP if PM
63 select USB_ARCH_HAS_EHCI if USB_SUPPORT 63 select USB_ARCH_HAS_EHCI if USB_SUPPORT
64 select ARM_CPU_SUSPEND if PM 64 select ARM_CPU_SUSPEND if PM
65 select ARCH_NEEDS_CPU_IDLE_COUPLED 65 select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
66 66
67config SOC_OMAP5 67config SOC_OMAP5
68 bool "TI OMAP5" 68 bool "TI OMAP5"
69 select CPU_V7 69 select CPU_V7
70 select ARM_GIC 70 select ARM_GIC
71 select HAVE_SMP 71 select HAVE_SMP
72 select ARM_CPU_SUSPEND if PM
72 73
73comment "OMAP Core Type" 74comment "OMAP Core Type"
74 depends on ARCH_OMAP2 75 depends on ARCH_OMAP2
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 74915295482e..28214483aaba 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -554,6 +554,8 @@ static const struct usbhs_omap_board_data igep3_usbhs_bdata __initconst = {
554 554
555#ifdef CONFIG_OMAP_MUX 555#ifdef CONFIG_OMAP_MUX
556static struct omap_board_mux board_mux[] __initdata = { 556static struct omap_board_mux board_mux[] __initdata = {
557 /* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */
558 OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
557 { .reg_offset = OMAP_MUX_TERMINATOR }, 559 { .reg_offset = OMAP_MUX_TERMINATOR },
558}; 560};
559#endif 561#endif
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index ef230a0eb5eb..0d362e9f9cb9 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -58,6 +58,7 @@
58#include "hsmmc.h" 58#include "hsmmc.h"
59#include "common-board-devices.h" 59#include "common-board-devices.h"
60 60
61#define OMAP3_EVM_TS_GPIO 175
61#define OMAP3_EVM_EHCI_VBUS 22 62#define OMAP3_EVM_EHCI_VBUS 22
62#define OMAP3_EVM_EHCI_SELECT 61 63#define OMAP3_EVM_EHCI_SELECT 61
63 64
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
index 14734746457c..c1875862679f 100644
--- a/arch/arm/mach-omap2/common-board-devices.c
+++ b/arch/arm/mach-omap2/common-board-devices.c
@@ -35,16 +35,6 @@ static struct omap2_mcspi_device_config ads7846_mcspi_config = {
35 .turbo_mode = 0, 35 .turbo_mode = 0,
36}; 36};
37 37
38/*
39 * ADS7846 driver maybe request a gpio according to the value
40 * of pdata->get_pendown_state, but we have done this. So set
41 * get_pendown_state to avoid twice gpio requesting.
42 */
43static int omap3_get_pendown_state(void)
44{
45 return !gpio_get_value(OMAP3_EVM_TS_GPIO);
46}
47
48static struct ads7846_platform_data ads7846_config = { 38static struct ads7846_platform_data ads7846_config = {
49 .x_max = 0x0fff, 39 .x_max = 0x0fff,
50 .y_max = 0x0fff, 40 .y_max = 0x0fff,
@@ -55,7 +45,6 @@ static struct ads7846_platform_data ads7846_config = {
55 .debounce_rep = 1, 45 .debounce_rep = 1,
56 .gpio_pendown = -EINVAL, 46 .gpio_pendown = -EINVAL,
57 .keep_vref_on = 1, 47 .keep_vref_on = 1,
58 .get_pendown_state = &omap3_get_pendown_state,
59}; 48};
60 49
61static struct spi_board_info ads7846_spi_board_info __initdata = { 50static struct spi_board_info ads7846_spi_board_info __initdata = {
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
index 4c4ef6a6166b..a0b4a42836ab 100644
--- a/arch/arm/mach-omap2/common-board-devices.h
+++ b/arch/arm/mach-omap2/common-board-devices.h
@@ -4,7 +4,6 @@
4#include "twl-common.h" 4#include "twl-common.h"
5 5
6#define NAND_BLOCK_SIZE SZ_128K 6#define NAND_BLOCK_SIZE SZ_128K
7#define OMAP3_EVM_TS_GPIO 175
8 7
9struct mtd_partition; 8struct mtd_partition;
10struct ads7846_platform_data; 9struct ads7846_platform_data;
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index ee05e193fc61..288bee6cbb76 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -238,8 +238,9 @@ int __init omap4_idle_init(void)
238 for_each_cpu(cpu_id, cpu_online_mask) { 238 for_each_cpu(cpu_id, cpu_online_mask) {
239 dev = &per_cpu(omap4_idle_dev, cpu_id); 239 dev = &per_cpu(omap4_idle_dev, cpu_id);
240 dev->cpu = cpu_id; 240 dev->cpu = cpu_id;
241#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
241 dev->coupled_cpus = *cpu_online_mask; 242 dev->coupled_cpus = *cpu_online_mask;
242 243#endif
243 cpuidle_register_driver(&omap4_idle_driver); 244 cpuidle_register_driver(&omap4_idle_driver);
244 245
245 if (cpuidle_register_device(dev)) { 246 if (cpuidle_register_device(dev)) {
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 471e62a74a16..76f9b3c2f586 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -127,7 +127,6 @@ struct omap_mux_partition {
127 * @gpio: GPIO number 127 * @gpio: GPIO number
128 * @muxnames: available signal modes for a ball 128 * @muxnames: available signal modes for a ball
129 * @balls: available balls on the package 129 * @balls: available balls on the package
130 * @partition: mux partition
131 */ 130 */
132struct omap_mux { 131struct omap_mux {
133 u16 reg_offset; 132 u16 reg_offset;
diff --git a/arch/arm/mach-omap2/opp4xxx_data.c b/arch/arm/mach-omap2/opp4xxx_data.c
index 2293ba27101b..c95415da23c2 100644
--- a/arch/arm/mach-omap2/opp4xxx_data.c
+++ b/arch/arm/mach-omap2/opp4xxx_data.c
@@ -94,7 +94,7 @@ int __init omap4_opp_init(void)
94{ 94{
95 int r = -ENODEV; 95 int r = -ENODEV;
96 96
97 if (!cpu_is_omap44xx()) 97 if (!cpu_is_omap443x())
98 return r; 98 return r;
99 99
100 r = omap_init_opp_table(omap44xx_opp_def_list, 100 r = omap_init_opp_table(omap44xx_opp_def_list,
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index e4fc88c65dbd..05bd8f02723f 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -272,21 +272,16 @@ void omap_sram_idle(void)
272 per_next_state = pwrdm_read_next_pwrst(per_pwrdm); 272 per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
273 core_next_state = pwrdm_read_next_pwrst(core_pwrdm); 273 core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
274 274
275 if (mpu_next_state < PWRDM_POWER_ON) { 275 pwrdm_pre_transition(NULL);
276 pwrdm_pre_transition(mpu_pwrdm);
277 pwrdm_pre_transition(neon_pwrdm);
278 }
279 276
280 /* PER */ 277 /* PER */
281 if (per_next_state < PWRDM_POWER_ON) { 278 if (per_next_state < PWRDM_POWER_ON) {
282 pwrdm_pre_transition(per_pwrdm);
283 per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0; 279 per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
284 omap2_gpio_prepare_for_idle(per_going_off); 280 omap2_gpio_prepare_for_idle(per_going_off);
285 } 281 }
286 282
287 /* CORE */ 283 /* CORE */
288 if (core_next_state < PWRDM_POWER_ON) { 284 if (core_next_state < PWRDM_POWER_ON) {
289 pwrdm_pre_transition(core_pwrdm);
290 if (core_next_state == PWRDM_POWER_OFF) { 285 if (core_next_state == PWRDM_POWER_OFF) {
291 omap3_core_save_context(); 286 omap3_core_save_context();
292 omap3_cm_save_context(); 287 omap3_cm_save_context();
@@ -339,20 +334,14 @@ void omap_sram_idle(void)
339 omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK, 334 omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
340 OMAP3430_GR_MOD, 335 OMAP3430_GR_MOD,
341 OMAP3_PRM_VOLTCTRL_OFFSET); 336 OMAP3_PRM_VOLTCTRL_OFFSET);
342 pwrdm_post_transition(core_pwrdm);
343 } 337 }
344 omap3_intc_resume_idle(); 338 omap3_intc_resume_idle();
345 339
340 pwrdm_post_transition(NULL);
341
346 /* PER */ 342 /* PER */
347 if (per_next_state < PWRDM_POWER_ON) { 343 if (per_next_state < PWRDM_POWER_ON)
348 omap2_gpio_resume_after_idle(); 344 omap2_gpio_resume_after_idle();
349 pwrdm_post_transition(per_pwrdm);
350 }
351
352 if (mpu_next_state < PWRDM_POWER_ON) {
353 pwrdm_post_transition(mpu_pwrdm);
354 pwrdm_post_transition(neon_pwrdm);
355 }
356} 345}
357 346
358static void omap3_pm_idle(void) 347static void omap3_pm_idle(void)
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index 9f6b83d1b193..91e71d8f46f0 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -56,9 +56,13 @@ ppa_por_params:
56 * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET. 56 * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
57 * It returns to the caller for CPU INACTIVE and ON power states or in case 57 * It returns to the caller for CPU INACTIVE and ON power states or in case
58 * CPU failed to transition to targeted OFF/DORMANT state. 58 * CPU failed to transition to targeted OFF/DORMANT state.
59 *
60 * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
61 * stack frame and it expects the caller to take care of it. Hence the entire
62 * stack frame is saved to avoid possible stack corruption.
59 */ 63 */
60ENTRY(omap4_finish_suspend) 64ENTRY(omap4_finish_suspend)
61 stmfd sp!, {lr} 65 stmfd sp!, {r4-r12, lr}
62 cmp r0, #0x0 66 cmp r0, #0x0
63 beq do_WFI @ No lowpower state, jump to WFI 67 beq do_WFI @ No lowpower state, jump to WFI
64 68
@@ -226,7 +230,7 @@ scu_gp_clear:
226skip_scu_gp_clear: 230skip_scu_gp_clear:
227 isb 231 isb
228 dsb 232 dsb
229 ldmfd sp!, {pc} 233 ldmfd sp!, {r4-r12, pc}
230ENDPROC(omap4_finish_suspend) 234ENDPROC(omap4_finish_suspend)
231 235
232/* 236/*
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index de47f170ba50..db5ff6642375 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -67,6 +67,7 @@ void __init omap_pmic_init(int bus, u32 clkrate,
67 const char *pmic_type, int pmic_irq, 67 const char *pmic_type, int pmic_irq,
68 struct twl4030_platform_data *pmic_data) 68 struct twl4030_platform_data *pmic_data)
69{ 69{
70 omap_mux_init_signal("sys_nirq", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
70 strncpy(pmic_i2c_board_info.type, pmic_type, 71 strncpy(pmic_i2c_board_info.type, pmic_type,
71 sizeof(pmic_i2c_board_info.type)); 72 sizeof(pmic_i2c_board_info.type));
72 pmic_i2c_board_info.irq = pmic_irq; 73 pmic_i2c_board_info.irq = pmic_irq;
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 9148b229d0de..410291c67666 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -109,7 +109,8 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
109{ 109{
110 orion_ge00_init(eth_data, 110 orion_ge00_init(eth_data,
111 ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM, 111 ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
112 IRQ_ORION5X_ETH_ERR); 112 IRQ_ORION5X_ETH_ERR,
113 MV643XX_TX_CSUM_DEFAULT_LIMIT);
113} 114}
114 115
115 116
diff --git a/arch/arm/mach-s3c24xx/include/mach/dma.h b/arch/arm/mach-s3c24xx/include/mach/dma.h
index 454831b66037..ee99fd56c043 100644
--- a/arch/arm/mach-s3c24xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c24xx/include/mach/dma.h
@@ -24,7 +24,8 @@
24*/ 24*/
25 25
26enum dma_ch { 26enum dma_ch {
27 DMACH_XD0, 27 DMACH_DT_PROP = -1, /* not yet supported, do not use */
28 DMACH_XD0 = 0,
28 DMACH_XD1, 29 DMACH_XD1,
29 DMACH_SDI, 30 DMACH_SDI,
30 DMACH_SPI0, 31 DMACH_SPI0,
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index c013bbf79cac..53d3d46dec12 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -41,7 +41,6 @@ config MACH_HREFV60
41config MACH_SNOWBALL 41config MACH_SNOWBALL
42 bool "U8500 Snowball platform" 42 bool "U8500 Snowball platform"
43 select MACH_MOP500 43 select MACH_MOP500
44 select LEDS_GPIO
45 help 44 help
46 Include support for the snowball development platform. 45 Include support for the snowball development platform.
47 46
diff --git a/arch/arm/mach-ux500/board-mop500-msp.c b/arch/arm/mach-ux500/board-mop500-msp.c
index 996048038743..df15646036aa 100644
--- a/arch/arm/mach-ux500/board-mop500-msp.c
+++ b/arch/arm/mach-ux500/board-mop500-msp.c
@@ -191,9 +191,9 @@ static struct platform_device *db8500_add_msp_i2s(struct device *parent,
191 return pdev; 191 return pdev;
192} 192}
193 193
194/* Platform device for ASoC U8500 machine */ 194/* Platform device for ASoC MOP500 machine */
195static struct platform_device snd_soc_u8500 = { 195static struct platform_device snd_soc_mop500 = {
196 .name = "snd-soc-u8500", 196 .name = "snd-soc-mop500",
197 .id = 0, 197 .id = 0,
198 .dev = { 198 .dev = {
199 .platform_data = NULL, 199 .platform_data = NULL,
@@ -227,8 +227,8 @@ int mop500_msp_init(struct device *parent)
227{ 227{
228 struct platform_device *msp1; 228 struct platform_device *msp1;
229 229
230 pr_info("%s: Register platform-device 'snd-soc-u8500'.\n", __func__); 230 pr_info("%s: Register platform-device 'snd-soc-mop500'.\n", __func__);
231 platform_device_register(&snd_soc_u8500); 231 platform_device_register(&snd_soc_mop500);
232 232
233 pr_info("Initialize MSP I2S-devices.\n"); 233 pr_info("Initialize MSP I2S-devices.\n");
234 db8500_add_msp_i2s(parent, 0, U8500_MSP0_BASE, IRQ_DB8500_MSP0, 234 db8500_add_msp_i2s(parent, 0, U8500_MSP0_BASE, IRQ_DB8500_MSP0,
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 8674a890fd1c..a534d8880de1 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -797,6 +797,7 @@ static void __init u8500_init_machine(void)
797 ARRAY_SIZE(mop500_platform_devs)); 797 ARRAY_SIZE(mop500_platform_devs));
798 798
799 mop500_sdi_init(parent); 799 mop500_sdi_init(parent);
800 mop500_msp_init(parent);
800 i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); 801 i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
801 i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); 802 i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
802 i2c_register_board_info(2, mop500_i2c2_devices, 803 i2c_register_board_info(2, mop500_i2c2_devices,
@@ -804,6 +805,8 @@ static void __init u8500_init_machine(void)
804 805
805 mop500_uib_init(); 806 mop500_uib_init();
806 807
808 } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
809 mop500_msp_init(parent);
807 } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) { 810 } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) {
808 /* 811 /*
809 * The HREFv60 board removed a GPIO expander and routed 812 * The HREFv60 board removed a GPIO expander and routed
@@ -815,6 +818,7 @@ static void __init u8500_init_machine(void)
815 ARRAY_SIZE(mop500_platform_devs)); 818 ARRAY_SIZE(mop500_platform_devs));
816 819
817 hrefv60_sdi_init(parent); 820 hrefv60_sdi_init(parent);
821 mop500_msp_init(parent);
818 822
819 i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); 823 i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
820 i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES; 824 i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES;
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 626ad8cad7a9..938b50a33439 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -189,6 +189,7 @@ struct omap_dm_timer *omap_dm_timer_request(void)
189 timer->reserved = 1; 189 timer->reserved = 1;
190 break; 190 break;
191 } 191 }
192 spin_unlock_irqrestore(&dm_timer_lock, flags);
192 193
193 if (timer) { 194 if (timer) {
194 ret = omap_dm_timer_prepare(timer); 195 ret = omap_dm_timer_prepare(timer);
@@ -197,7 +198,6 @@ struct omap_dm_timer *omap_dm_timer_request(void)
197 timer = NULL; 198 timer = NULL;
198 } 199 }
199 } 200 }
200 spin_unlock_irqrestore(&dm_timer_lock, flags);
201 201
202 if (!timer) 202 if (!timer)
203 pr_debug("%s: timer request failed!\n", __func__); 203 pr_debug("%s: timer request failed!\n", __func__);
@@ -220,6 +220,7 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)
220 break; 220 break;
221 } 221 }
222 } 222 }
223 spin_unlock_irqrestore(&dm_timer_lock, flags);
223 224
224 if (timer) { 225 if (timer) {
225 ret = omap_dm_timer_prepare(timer); 226 ret = omap_dm_timer_prepare(timer);
@@ -228,7 +229,6 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)
228 timer = NULL; 229 timer = NULL;
229 } 230 }
230 } 231 }
231 spin_unlock_irqrestore(&dm_timer_lock, flags);
232 232
233 if (!timer) 233 if (!timer)
234 pr_debug("%s: timer%d request failed!\n", __func__, id); 234 pr_debug("%s: timer%d request failed!\n", __func__, id);
@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
258 258
259void omap_dm_timer_disable(struct omap_dm_timer *timer) 259void omap_dm_timer_disable(struct omap_dm_timer *timer)
260{ 260{
261 pm_runtime_put(&timer->pdev->dev); 261 pm_runtime_put_sync(&timer->pdev->dev);
262} 262}
263EXPORT_SYMBOL_GPL(omap_dm_timer_disable); 263EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
264 264
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 68b180edcfff..bb5d08a70dbc 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -372,7 +372,8 @@ IS_OMAP_TYPE(3430, 0x3430)
372#define cpu_class_is_omap1() (cpu_is_omap7xx() || cpu_is_omap15xx() || \ 372#define cpu_class_is_omap1() (cpu_is_omap7xx() || cpu_is_omap15xx() || \
373 cpu_is_omap16xx()) 373 cpu_is_omap16xx())
374#define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \ 374#define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \
375 cpu_is_omap44xx() || soc_is_omap54xx()) 375 cpu_is_omap44xx() || soc_is_omap54xx() || \
376 soc_is_am33xx())
376 377
377/* Various silicon revisions for omap2 */ 378/* Various silicon revisions for omap2 */
378#define OMAP242X_CLASS 0x24200024 379#define OMAP242X_CLASS 0x24200024
diff --git a/arch/arm/plat-omap/include/plat/multi.h b/arch/arm/plat-omap/include/plat/multi.h
index 045e320f1067..324d31b14852 100644
--- a/arch/arm/plat-omap/include/plat/multi.h
+++ b/arch/arm/plat-omap/include/plat/multi.h
@@ -108,4 +108,13 @@
108# endif 108# endif
109#endif 109#endif
110 110
111#ifdef CONFIG_SOC_AM33XX
112# ifdef OMAP_NAME
113# undef MULTI_OMAP2
114# define MULTI_OMAP2
115# else
116# define OMAP_NAME am33xx
117# endif
118#endif
119
111#endif /* __PLAT_OMAP_MULTI_H */ 120#endif /* __PLAT_OMAP_MULTI_H */
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index b8d19a136781..7f7b112acccb 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -110,7 +110,7 @@ static inline void flush(void)
110 _DEBUG_LL_ENTRY(mach, AM33XX_UART##p##_BASE, OMAP_PORT_SHIFT, \ 110 _DEBUG_LL_ENTRY(mach, AM33XX_UART##p##_BASE, OMAP_PORT_SHIFT, \
111 AM33XXUART##p) 111 AM33XXUART##p)
112 112
113static inline void __arch_decomp_setup(unsigned long arch_id) 113static inline void arch_decomp_setup(void)
114{ 114{
115 int port = 0; 115 int port = 0;
116 116
@@ -198,8 +198,6 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
198 } while (0); 198 } while (0);
199} 199}
200 200
201#define arch_decomp_setup() __arch_decomp_setup(arch_id)
202
203/* 201/*
204 * nothing to do 202 * nothing to do
205 */ 203 */
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index d245a87dc014..b8b747a9d360 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -291,10 +291,12 @@ static struct platform_device orion_ge00 = {
291void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data, 291void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
292 unsigned long mapbase, 292 unsigned long mapbase,
293 unsigned long irq, 293 unsigned long irq,
294 unsigned long irq_err) 294 unsigned long irq_err,
295 unsigned int tx_csum_limit)
295{ 296{
296 fill_resources(&orion_ge00_shared, orion_ge00_shared_resources, 297 fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
297 mapbase + 0x2000, SZ_16K - 1, irq_err); 298 mapbase + 0x2000, SZ_16K - 1, irq_err);
299 orion_ge00_shared_data.tx_csum_limit = tx_csum_limit;
298 ge_complete(&orion_ge00_shared_data, 300 ge_complete(&orion_ge00_shared_data,
299 orion_ge00_resources, irq, &orion_ge00_shared, 301 orion_ge00_resources, irq, &orion_ge00_shared,
300 eth_data, &orion_ge00); 302 eth_data, &orion_ge00);
@@ -343,10 +345,12 @@ static struct platform_device orion_ge01 = {
343void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data, 345void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
344 unsigned long mapbase, 346 unsigned long mapbase,
345 unsigned long irq, 347 unsigned long irq,
346 unsigned long irq_err) 348 unsigned long irq_err,
349 unsigned int tx_csum_limit)
347{ 350{
348 fill_resources(&orion_ge01_shared, orion_ge01_shared_resources, 351 fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
349 mapbase + 0x2000, SZ_16K - 1, irq_err); 352 mapbase + 0x2000, SZ_16K - 1, irq_err);
353 orion_ge01_shared_data.tx_csum_limit = tx_csum_limit;
350 ge_complete(&orion_ge01_shared_data, 354 ge_complete(&orion_ge01_shared_data,
351 orion_ge01_resources, irq, &orion_ge01_shared, 355 orion_ge01_resources, irq, &orion_ge01_shared,
352 eth_data, &orion_ge01); 356 eth_data, &orion_ge01);
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index e00fdb213609..ae2377ef63e5 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -39,12 +39,14 @@ void __init orion_rtc_init(unsigned long mapbase,
39void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data, 39void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
40 unsigned long mapbase, 40 unsigned long mapbase,
41 unsigned long irq, 41 unsigned long irq,
42 unsigned long irq_err); 42 unsigned long irq_err,
43 unsigned int tx_csum_limit);
43 44
44void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data, 45void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
45 unsigned long mapbase, 46 unsigned long mapbase,
46 unsigned long irq, 47 unsigned long irq,
47 unsigned long irq_err); 48 unsigned long irq_err,
49 unsigned int tx_csum_limit);
48 50
49void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data, 51void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
50 unsigned long mapbase, 52 unsigned long mapbase,
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 28f898f75380..db98e7021f0d 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -430,7 +430,7 @@ s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
430 * when necessary. 430 * when necessary.
431*/ 431*/
432 432
433int s3c2410_dma_enqueue(unsigned int channel, void *id, 433int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
434 dma_addr_t data, int size) 434 dma_addr_t data, int size)
435{ 435{
436 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 436 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 74e31ce35538..fc49f3dabd76 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -32,6 +32,8 @@
32#include <linux/platform_data/s3c-hsudc.h> 32#include <linux/platform_data/s3c-hsudc.h>
33#include <linux/platform_data/s3c-hsotg.h> 33#include <linux/platform_data/s3c-hsotg.h>
34 34
35#include <media/s5p_hdmi.h>
36
35#include <asm/irq.h> 37#include <asm/irq.h>
36#include <asm/pmu.h> 38#include <asm/pmu.h>
37#include <asm/mach/arch.h> 39#include <asm/mach/arch.h>
@@ -748,7 +750,8 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd)
748 if (!pd) { 750 if (!pd) {
749 pd = &default_i2c_data; 751 pd = &default_i2c_data;
750 752
751 if (soc_is_exynos4210()) 753 if (soc_is_exynos4210() ||
754 soc_is_exynos4212() || soc_is_exynos4412())
752 pd->bus_num = 8; 755 pd->bus_num = 8;
753 else if (soc_is_s5pv210()) 756 else if (soc_is_s5pv210())
754 pd->bus_num = 3; 757 pd->bus_num = 3;
@@ -759,6 +762,30 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd)
759 npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c), 762 npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
760 &s5p_device_i2c_hdmiphy); 763 &s5p_device_i2c_hdmiphy);
761} 764}
765
766struct s5p_hdmi_platform_data s5p_hdmi_def_platdata;
767
768void __init s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info,
769 struct i2c_board_info *mhl_info, int mhl_bus)
770{
771 struct s5p_hdmi_platform_data *pd = &s5p_hdmi_def_platdata;
772
773 if (soc_is_exynos4210() ||
774 soc_is_exynos4212() || soc_is_exynos4412())
775 pd->hdmiphy_bus = 8;
776 else if (soc_is_s5pv210())
777 pd->hdmiphy_bus = 3;
778 else
779 pd->hdmiphy_bus = 0;
780
781 pd->hdmiphy_info = hdmiphy_info;
782 pd->mhl_info = mhl_info;
783 pd->mhl_bus = mhl_bus;
784
785 s3c_set_platdata(pd, sizeof(struct s5p_hdmi_platform_data),
786 &s5p_device_hdmi);
787}
788
762#endif /* CONFIG_S5P_DEV_I2C_HDMIPHY */ 789#endif /* CONFIG_S5P_DEV_I2C_HDMIPHY */
763 790
764/* I2S */ 791/* I2S */
diff --git a/arch/arm/plat-samsung/include/plat/hdmi.h b/arch/arm/plat-samsung/include/plat/hdmi.h
new file mode 100644
index 000000000000..331d046ac2c5
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/hdmi.h
@@ -0,0 +1,16 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 */
9
10#ifndef __PLAT_SAMSUNG_HDMI_H
11#define __PLAT_SAMSUNG_HDMI_H __FILE__
12
13extern void s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info,
14 struct i2c_board_info *mhl_info, int mhl_bus);
15
16#endif /* __PLAT_SAMSUNG_HDMI_H */
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 64ab65f0fdbc..15070284343e 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -74,7 +74,7 @@ unsigned char pm_uart_udivslot;
74 74
75#ifdef CONFIG_SAMSUNG_PM_DEBUG 75#ifdef CONFIG_SAMSUNG_PM_DEBUG
76 76
77struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS]; 77static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS];
78 78
79static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save) 79static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
80{ 80{
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 954d81e2e837..7913695b2fcb 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -234,5 +234,4 @@ CONFIG_CRYPTO_PCBC=m
234CONFIG_CRYPTO_MD5=y 234CONFIG_CRYPTO_MD5=y
235# CONFIG_CRYPTO_ANSI_CPRNG is not set 235# CONFIG_CRYPTO_ANSI_CPRNG is not set
236CONFIG_CRC_T10DIF=y 236CONFIG_CRC_T10DIF=y
237CONFIG_MISC_DEVICES=y
238CONFIG_INTEL_IOMMU=y 237CONFIG_INTEL_IOMMU=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index 91c41ecfa6d9..f8e913365423 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -209,4 +209,3 @@ CONFIG_MAGIC_SYSRQ=y
209CONFIG_DEBUG_KERNEL=y 209CONFIG_DEBUG_KERNEL=y
210CONFIG_DEBUG_MUTEXES=y 210CONFIG_DEBUG_MUTEXES=y
211CONFIG_CRYPTO_MD5=y 211CONFIG_CRYPTO_MD5=y
212CONFIG_MISC_DEVICES=y
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 4a469907f04a..b22df9410dce 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -5,6 +5,7 @@ config M68K
5 select HAVE_AOUT if MMU 5 select HAVE_AOUT if MMU
6 select HAVE_GENERIC_HARDIRQS 6 select HAVE_GENERIC_HARDIRQS
7 select GENERIC_IRQ_SHOW 7 select GENERIC_IRQ_SHOW
8 select GENERIC_ATOMIC64
8 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS 9 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
9 select GENERIC_CPU_DEVICES 10 select GENERIC_CPU_DEVICES
10 select GENERIC_STRNCPY_FROM_USER if MMU 11 select GENERIC_STRNCPY_FROM_USER if MMU
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 82068349a2bb..c4eb79edecec 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -28,6 +28,7 @@ config COLDFIRE
28 select CPU_HAS_NO_BITFIELDS 28 select CPU_HAS_NO_BITFIELDS
29 select CPU_HAS_NO_MULDIV64 29 select CPU_HAS_NO_MULDIV64
30 select GENERIC_CSUM 30 select GENERIC_CSUM
31 select HAVE_CLK
31 32
32endchoice 33endchoice
33 34
@@ -58,7 +59,6 @@ config MCPU32
58config M68020 59config M68020
59 bool "68020 support" 60 bool "68020 support"
60 depends on MMU 61 depends on MMU
61 select GENERIC_ATOMIC64
62 select CPU_HAS_ADDRESS_SPACES 62 select CPU_HAS_ADDRESS_SPACES
63 help 63 help
64 If you anticipate running this kernel on a computer with a MC68020 64 If you anticipate running this kernel on a computer with a MC68020
@@ -69,7 +69,6 @@ config M68020
69config M68030 69config M68030
70 bool "68030 support" 70 bool "68030 support"
71 depends on MMU && !MMU_SUN3 71 depends on MMU && !MMU_SUN3
72 select GENERIC_ATOMIC64
73 select CPU_HAS_ADDRESS_SPACES 72 select CPU_HAS_ADDRESS_SPACES
74 help 73 help
75 If you anticipate running this kernel on a computer with a MC68030 74 If you anticipate running this kernel on a computer with a MC68030
@@ -79,7 +78,6 @@ config M68030
79config M68040 78config M68040
80 bool "68040 support" 79 bool "68040 support"
81 depends on MMU && !MMU_SUN3 80 depends on MMU && !MMU_SUN3
82 select GENERIC_ATOMIC64
83 select CPU_HAS_ADDRESS_SPACES 81 select CPU_HAS_ADDRESS_SPACES
84 help 82 help
85 If you anticipate running this kernel on a computer with a MC68LC040 83 If you anticipate running this kernel on a computer with a MC68LC040
@@ -90,7 +88,6 @@ config M68040
90config M68060 88config M68060
91 bool "68060 support" 89 bool "68060 support"
92 depends on MMU && !MMU_SUN3 90 depends on MMU && !MMU_SUN3
93 select GENERIC_ATOMIC64
94 select CPU_HAS_ADDRESS_SPACES 91 select CPU_HAS_ADDRESS_SPACES
95 help 92 help
96 If you anticipate running this kernel on a computer with a MC68060 93 If you anticipate running this kernel on a computer with a MC68060
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 331d574df99c..faf65286574e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -89,6 +89,7 @@ config ATH79
89 select CEVT_R4K 89 select CEVT_R4K
90 select CSRC_R4K 90 select CSRC_R4K
91 select DMA_NONCOHERENT 91 select DMA_NONCOHERENT
92 select HAVE_CLK
92 select IRQ_CPU 93 select IRQ_CPU
93 select MIPS_MACHINE 94 select MIPS_MACHINE
94 select SYS_HAS_CPU_MIPS32_R2 95 select SYS_HAS_CPU_MIPS32_R2
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 99969484c475..a124c251c0c9 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -228,6 +228,8 @@ static int mtx1_pci_idsel(unsigned int devsel, int assert)
228 * adapter on the mtx-1 "singleboard" variant. It triggers a custom 228 * adapter on the mtx-1 "singleboard" variant. It triggers a custom
229 * logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals. 229 * logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals.
230 */ 230 */
231 udelay(1);
232
231 if (assert && devsel != 0) 233 if (assert && devsel != 0)
232 /* Suppress signal to Cardbus */ 234 /* Suppress signal to Cardbus */
233 alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */ 235 alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */
diff --git a/arch/mips/ath79/dev-usb.c b/arch/mips/ath79/dev-usb.c
index 36e9570e7bc4..b2a2311ec85b 100644
--- a/arch/mips/ath79/dev-usb.c
+++ b/arch/mips/ath79/dev-usb.c
@@ -145,6 +145,8 @@ static void __init ar7240_usb_setup(void)
145 145
146 ath79_ohci_resources[0].start = AR7240_OHCI_BASE; 146 ath79_ohci_resources[0].start = AR7240_OHCI_BASE;
147 ath79_ohci_resources[0].end = AR7240_OHCI_BASE + AR7240_OHCI_SIZE - 1; 147 ath79_ohci_resources[0].end = AR7240_OHCI_BASE + AR7240_OHCI_SIZE - 1;
148 ath79_ohci_resources[1].start = ATH79_CPU_IRQ_USB;
149 ath79_ohci_resources[1].end = ATH79_CPU_IRQ_USB;
148 platform_device_register(&ath79_ohci_device); 150 platform_device_register(&ath79_ohci_device);
149} 151}
150 152
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
index 29054f211832..48fe762d2526 100644
--- a/arch/mips/ath79/gpio.c
+++ b/arch/mips/ath79/gpio.c
@@ -188,8 +188,10 @@ void __init ath79_gpio_init(void)
188 188
189 if (soc_is_ar71xx()) 189 if (soc_is_ar71xx())
190 ath79_gpio_count = AR71XX_GPIO_COUNT; 190 ath79_gpio_count = AR71XX_GPIO_COUNT;
191 else if (soc_is_ar724x()) 191 else if (soc_is_ar7240())
192 ath79_gpio_count = AR724X_GPIO_COUNT; 192 ath79_gpio_count = AR7240_GPIO_COUNT;
193 else if (soc_is_ar7241() || soc_is_ar7242())
194 ath79_gpio_count = AR7241_GPIO_COUNT;
193 else if (soc_is_ar913x()) 195 else if (soc_is_ar913x())
194 ath79_gpio_count = AR913X_GPIO_COUNT; 196 ath79_gpio_count = AR913X_GPIO_COUNT;
195 else if (soc_is_ar933x()) 197 else if (soc_is_ar933x())
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
index e39f73048d4f..f1c9c3e2f678 100644
--- a/arch/mips/bcm63xx/dev-spi.c
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -106,11 +106,15 @@ int __init bcm63xx_spi_register(void)
106 if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { 106 if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
107 spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1; 107 spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1;
108 spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE; 108 spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE;
109 spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT;
110 spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH;
109 } 111 }
110 112
111 if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) { 113 if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
112 spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1; 114 spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
113 spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE; 115 spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
116 spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
117 spi_pdata.msg_ctl_width = SPI_6358_MSG_CTL_WIDTH;
114 } 118 }
115 119
116 bcm63xx_spi_regs_init(); 120 bcm63xx_spi_regs_init();
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 7fb1f222b8a5..274cd4fad30c 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -61,6 +61,12 @@ static void octeon_irq_set_ciu_mapping(int irq, int line, int bit,
61 octeon_irq_ciu_to_irq[line][bit] = irq; 61 octeon_irq_ciu_to_irq[line][bit] = irq;
62} 62}
63 63
64static void octeon_irq_force_ciu_mapping(struct irq_domain *domain,
65 int irq, int line, int bit)
66{
67 irq_domain_associate(domain, irq, line << 6 | bit);
68}
69
64static int octeon_coreid_for_cpu(int cpu) 70static int octeon_coreid_for_cpu(int cpu)
65{ 71{
66#ifdef CONFIG_SMP 72#ifdef CONFIG_SMP
@@ -183,19 +189,9 @@ static void __init octeon_irq_init_core(void)
183 mutex_init(&cd->core_irq_mutex); 189 mutex_init(&cd->core_irq_mutex);
184 190
185 irq = OCTEON_IRQ_SW0 + i; 191 irq = OCTEON_IRQ_SW0 + i;
186 switch (irq) { 192 irq_set_chip_data(irq, cd);
187 case OCTEON_IRQ_TIMER: 193 irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
188 case OCTEON_IRQ_SW0: 194 handle_percpu_irq);
189 case OCTEON_IRQ_SW1:
190 case OCTEON_IRQ_5:
191 case OCTEON_IRQ_PERF:
192 irq_set_chip_data(irq, cd);
193 irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
194 handle_percpu_irq);
195 break;
196 default:
197 break;
198 }
199 } 195 }
200} 196}
201 197
@@ -890,7 +886,6 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,
890 unsigned int type; 886 unsigned int type;
891 unsigned int pin; 887 unsigned int pin;
892 unsigned int trigger; 888 unsigned int trigger;
893 struct octeon_irq_gpio_domain_data *gpiod;
894 889
895 if (d->of_node != node) 890 if (d->of_node != node)
896 return -EINVAL; 891 return -EINVAL;
@@ -925,8 +920,7 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,
925 break; 920 break;
926 } 921 }
927 *out_type = type; 922 *out_type = type;
928 gpiod = d->host_data; 923 *out_hwirq = pin;
929 *out_hwirq = gpiod->base_hwirq + pin;
930 924
931 return 0; 925 return 0;
932} 926}
@@ -996,19 +990,21 @@ static int octeon_irq_ciu_map(struct irq_domain *d,
996static int octeon_irq_gpio_map(struct irq_domain *d, 990static int octeon_irq_gpio_map(struct irq_domain *d,
997 unsigned int virq, irq_hw_number_t hw) 991 unsigned int virq, irq_hw_number_t hw)
998{ 992{
999 unsigned int line = hw >> 6; 993 struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
1000 unsigned int bit = hw & 63; 994 unsigned int line, bit;
1001 995
1002 if (!octeon_irq_virq_in_range(virq)) 996 if (!octeon_irq_virq_in_range(virq))
1003 return -EINVAL; 997 return -EINVAL;
1004 998
999 hw += gpiod->base_hwirq;
1000 line = hw >> 6;
1001 bit = hw & 63;
1005 if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) 1002 if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
1006 return -EINVAL; 1003 return -EINVAL;
1007 1004
1008 octeon_irq_set_ciu_mapping(virq, line, bit, 1005 octeon_irq_set_ciu_mapping(virq, line, bit,
1009 octeon_irq_gpio_chip, 1006 octeon_irq_gpio_chip,
1010 octeon_irq_handle_gpio); 1007 octeon_irq_handle_gpio);
1011
1012 return 0; 1008 return 0;
1013} 1009}
1014 1010
@@ -1149,6 +1145,7 @@ static void __init octeon_irq_init_ciu(void)
1149 struct irq_chip *chip_wd; 1145 struct irq_chip *chip_wd;
1150 struct device_node *gpio_node; 1146 struct device_node *gpio_node;
1151 struct device_node *ciu_node; 1147 struct device_node *ciu_node;
1148 struct irq_domain *ciu_domain = NULL;
1152 1149
1153 octeon_irq_init_ciu_percpu(); 1150 octeon_irq_init_ciu_percpu();
1154 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; 1151 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
@@ -1177,31 +1174,6 @@ static void __init octeon_irq_init_ciu(void)
1177 /* Mips internal */ 1174 /* Mips internal */
1178 octeon_irq_init_core(); 1175 octeon_irq_init_core();
1179 1176
1180 /* CIU_0 */
1181 for (i = 0; i < 16; i++)
1182 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq);
1183
1184 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
1185 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
1186
1187 for (i = 0; i < 4; i++)
1188 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq);
1189 for (i = 0; i < 4; i++)
1190 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq);
1191
1192 octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq);
1193 for (i = 0; i < 4; i++)
1194 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip, handle_edge_irq);
1195
1196 octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq);
1197 octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq);
1198
1199 /* CIU_1 */
1200 for (i = 0; i < 16; i++)
1201 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
1202
1203 octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq);
1204
1205 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); 1177 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
1206 if (gpio_node) { 1178 if (gpio_node) {
1207 struct octeon_irq_gpio_domain_data *gpiod; 1179 struct octeon_irq_gpio_domain_data *gpiod;
@@ -1219,10 +1191,35 @@ static void __init octeon_irq_init_ciu(void)
1219 1191
1220 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); 1192 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");
1221 if (ciu_node) { 1193 if (ciu_node) {
1222 irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); 1194 ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
1223 of_node_put(ciu_node); 1195 of_node_put(ciu_node);
1224 } else 1196 } else
1225 pr_warn("Cannot find device node for cavium,octeon-3860-ciu.\n"); 1197 panic("Cannot find device node for cavium,octeon-3860-ciu.");
1198
1199 /* CIU_0 */
1200 for (i = 0; i < 16; i++)
1201 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
1202
1203 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
1204 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
1205
1206 for (i = 0; i < 4; i++)
1207 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
1208 for (i = 0; i < 4; i++)
1209 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
1210
1211 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
1212 for (i = 0; i < 4; i++)
1213 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
1214
1215 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
1216 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_BOOTDMA, 0, 63);
1217
1218 /* CIU_1 */
1219 for (i = 0; i < 16; i++)
1220 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
1221
1222 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
1226 1223
1227 /* Enable the CIU lines */ 1224 /* Enable the CIU lines */
1228 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1225 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index 1caa78ad06d5..dde504477fac 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -393,7 +393,8 @@
393#define AR71XX_GPIO_REG_FUNC 0x28 393#define AR71XX_GPIO_REG_FUNC 0x28
394 394
395#define AR71XX_GPIO_COUNT 16 395#define AR71XX_GPIO_COUNT 16
396#define AR724X_GPIO_COUNT 18 396#define AR7240_GPIO_COUNT 18
397#define AR7241_GPIO_COUNT 20
397#define AR913X_GPIO_COUNT 22 398#define AR913X_GPIO_COUNT 22
398#define AR933X_GPIO_COUNT 30 399#define AR933X_GPIO_COUNT 30
399#define AR934X_GPIO_COUNT 23 400#define AR934X_GPIO_COUNT 23
diff --git a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
index 4476fa03bf36..6ddae926bf79 100644
--- a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
@@ -42,7 +42,6 @@
42#define cpu_has_mips64r1 0 42#define cpu_has_mips64r1 0
43#define cpu_has_mips64r2 0 43#define cpu_has_mips64r2 0
44 44
45#define cpu_has_dsp 0
46#define cpu_has_mipsmt 0 45#define cpu_has_mipsmt 0
47 46
48#define cpu_has_64bits 0 47#define cpu_has_64bits 0
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
index 7d98dbe5d4b5..c9bae1362606 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -9,6 +9,8 @@ int __init bcm63xx_spi_register(void);
9 9
10struct bcm63xx_spi_pdata { 10struct bcm63xx_spi_pdata {
11 unsigned int fifo_size; 11 unsigned int fifo_size;
12 unsigned int msg_type_shift;
13 unsigned int msg_ctl_width;
12 int bus_num; 14 int bus_num;
13 int num_chipselect; 15 int num_chipselect;
14 u32 speed_hz; 16 u32 speed_hz;
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index 4ccc2a748aff..61f2a2a5099d 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -1054,7 +1054,8 @@
1054#define SPI_6338_FILL_BYTE 0x07 1054#define SPI_6338_FILL_BYTE 0x07
1055#define SPI_6338_MSG_TAIL 0x09 1055#define SPI_6338_MSG_TAIL 0x09
1056#define SPI_6338_RX_TAIL 0x0b 1056#define SPI_6338_RX_TAIL 0x0b
1057#define SPI_6338_MSG_CTL 0x40 1057#define SPI_6338_MSG_CTL 0x40 /* 8-bits register */
1058#define SPI_6338_MSG_CTL_WIDTH 8
1058#define SPI_6338_MSG_DATA 0x41 1059#define SPI_6338_MSG_DATA 0x41
1059#define SPI_6338_MSG_DATA_SIZE 0x3f 1060#define SPI_6338_MSG_DATA_SIZE 0x3f
1060#define SPI_6338_RX_DATA 0x80 1061#define SPI_6338_RX_DATA 0x80
@@ -1070,7 +1071,8 @@
1070#define SPI_6348_FILL_BYTE 0x07 1071#define SPI_6348_FILL_BYTE 0x07
1071#define SPI_6348_MSG_TAIL 0x09 1072#define SPI_6348_MSG_TAIL 0x09
1072#define SPI_6348_RX_TAIL 0x0b 1073#define SPI_6348_RX_TAIL 0x0b
1073#define SPI_6348_MSG_CTL 0x40 1074#define SPI_6348_MSG_CTL 0x40 /* 8-bits register */
1075#define SPI_6348_MSG_CTL_WIDTH 8
1074#define SPI_6348_MSG_DATA 0x41 1076#define SPI_6348_MSG_DATA 0x41
1075#define SPI_6348_MSG_DATA_SIZE 0x3f 1077#define SPI_6348_MSG_DATA_SIZE 0x3f
1076#define SPI_6348_RX_DATA 0x80 1078#define SPI_6348_RX_DATA 0x80
@@ -1078,6 +1080,7 @@
1078 1080
1079/* BCM 6358 SPI core */ 1081/* BCM 6358 SPI core */
1080#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */ 1082#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */
1083#define SPI_6358_MSG_CTL_WIDTH 16
1081#define SPI_6358_MSG_DATA 0x02 1084#define SPI_6358_MSG_DATA 0x02
1082#define SPI_6358_MSG_DATA_SIZE 0x21e 1085#define SPI_6358_MSG_DATA_SIZE 0x21e
1083#define SPI_6358_RX_DATA 0x400 1086#define SPI_6358_RX_DATA 0x400
@@ -1094,6 +1097,7 @@
1094 1097
1095/* BCM 6358 SPI core */ 1098/* BCM 6358 SPI core */
1096#define SPI_6368_MSG_CTL 0x00 /* 16-bits register */ 1099#define SPI_6368_MSG_CTL 0x00 /* 16-bits register */
1100#define SPI_6368_MSG_CTL_WIDTH 16
1097#define SPI_6368_MSG_DATA 0x02 1101#define SPI_6368_MSG_DATA 0x02
1098#define SPI_6368_MSG_DATA_SIZE 0x21e 1102#define SPI_6368_MSG_DATA_SIZE 0x21e
1099#define SPI_6368_RX_DATA 0x400 1103#define SPI_6368_RX_DATA 0x400
@@ -1115,7 +1119,10 @@
1115#define SPI_HD_W 0x01 1119#define SPI_HD_W 0x01
1116#define SPI_HD_R 0x02 1120#define SPI_HD_R 0x02
1117#define SPI_BYTE_CNT_SHIFT 0 1121#define SPI_BYTE_CNT_SHIFT 0
1118#define SPI_MSG_TYPE_SHIFT 14 1122#define SPI_6338_MSG_TYPE_SHIFT 6
1123#define SPI_6348_MSG_TYPE_SHIFT 6
1124#define SPI_6358_MSG_TYPE_SHIFT 14
1125#define SPI_6368_MSG_TYPE_SHIFT 14
1119 1126
1120/* Command */ 1127/* Command */
1121#define SPI_CMD_NOOP 0x00 1128#define SPI_CMD_NOOP 0x00
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
index 418992042f6f..c22a3078bf11 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/irq.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -21,14 +21,10 @@ enum octeon_irq {
21 OCTEON_IRQ_TIMER, 21 OCTEON_IRQ_TIMER,
22/* sources in CIU_INTX_EN0 */ 22/* sources in CIU_INTX_EN0 */
23 OCTEON_IRQ_WORKQ0, 23 OCTEON_IRQ_WORKQ0,
24 OCTEON_IRQ_GPIO0 = OCTEON_IRQ_WORKQ0 + 16, 24 OCTEON_IRQ_WDOG0 = OCTEON_IRQ_WORKQ0 + 16,
25 OCTEON_IRQ_WDOG0 = OCTEON_IRQ_GPIO0 + 16,
26 OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15, 25 OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15,
27 OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16, 26 OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16,
28 OCTEON_IRQ_MBOX1, 27 OCTEON_IRQ_MBOX1,
29 OCTEON_IRQ_UART0,
30 OCTEON_IRQ_UART1,
31 OCTEON_IRQ_UART2,
32 OCTEON_IRQ_PCI_INT0, 28 OCTEON_IRQ_PCI_INT0,
33 OCTEON_IRQ_PCI_INT1, 29 OCTEON_IRQ_PCI_INT1,
34 OCTEON_IRQ_PCI_INT2, 30 OCTEON_IRQ_PCI_INT2,
@@ -38,8 +34,6 @@ enum octeon_irq {
38 OCTEON_IRQ_PCI_MSI2, 34 OCTEON_IRQ_PCI_MSI2,
39 OCTEON_IRQ_PCI_MSI3, 35 OCTEON_IRQ_PCI_MSI3,
40 36
41 OCTEON_IRQ_TWSI,
42 OCTEON_IRQ_TWSI2,
43 OCTEON_IRQ_RML, 37 OCTEON_IRQ_RML,
44 OCTEON_IRQ_TIMER0, 38 OCTEON_IRQ_TIMER0,
45 OCTEON_IRQ_TIMER1, 39 OCTEON_IRQ_TIMER1,
@@ -47,8 +41,6 @@ enum octeon_irq {
47 OCTEON_IRQ_TIMER3, 41 OCTEON_IRQ_TIMER3,
48 OCTEON_IRQ_USB0, 42 OCTEON_IRQ_USB0,
49 OCTEON_IRQ_USB1, 43 OCTEON_IRQ_USB1,
50 OCTEON_IRQ_MII0,
51 OCTEON_IRQ_MII1,
52 OCTEON_IRQ_BOOTDMA, 44 OCTEON_IRQ_BOOTDMA,
53#ifndef CONFIG_PCI_MSI 45#ifndef CONFIG_PCI_MSI
54 OCTEON_IRQ_LAST = 127 46 OCTEON_IRQ_LAST = 127
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 7531ecd654d6..dca8bce8c7ab 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -10,6 +10,7 @@ struct mod_arch_specific {
10 struct list_head dbe_list; 10 struct list_head dbe_list;
11 const struct exception_table_entry *dbe_start; 11 const struct exception_table_entry *dbe_start;
12 const struct exception_table_entry *dbe_end; 12 const struct exception_table_entry *dbe_end;
13 struct mips_hi16 *r_mips_hi16_list;
13}; 14};
14 15
15typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */ 16typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
diff --git a/arch/mips/include/asm/r4k-timer.h b/arch/mips/include/asm/r4k-timer.h
index a37d12b3b61c..afe9e0e03fe9 100644
--- a/arch/mips/include/asm/r4k-timer.h
+++ b/arch/mips/include/asm/r4k-timer.h
@@ -12,16 +12,16 @@
12 12
13#ifdef CONFIG_SYNC_R4K 13#ifdef CONFIG_SYNC_R4K
14 14
15extern void synchronise_count_master(void); 15extern void synchronise_count_master(int cpu);
16extern void synchronise_count_slave(void); 16extern void synchronise_count_slave(int cpu);
17 17
18#else 18#else
19 19
20static inline void synchronise_count_master(void) 20static inline void synchronise_count_master(int cpu)
21{ 21{
22} 22}
23 23
24static inline void synchronise_count_slave(void) 24static inline void synchronise_count_slave(int cpu)
25{ 25{
26} 26}
27 27
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index a5066b1c3de3..4f8c3cba8c0c 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -39,8 +39,6 @@ struct mips_hi16 {
39 Elf_Addr value; 39 Elf_Addr value;
40}; 40};
41 41
42static struct mips_hi16 *mips_hi16_list;
43
44static LIST_HEAD(dbe_list); 42static LIST_HEAD(dbe_list);
45static DEFINE_SPINLOCK(dbe_lock); 43static DEFINE_SPINLOCK(dbe_lock);
46 44
@@ -128,8 +126,8 @@ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
128 126
129 n->addr = (Elf_Addr *)location; 127 n->addr = (Elf_Addr *)location;
130 n->value = v; 128 n->value = v;
131 n->next = mips_hi16_list; 129 n->next = me->arch.r_mips_hi16_list;
132 mips_hi16_list = n; 130 me->arch.r_mips_hi16_list = n;
133 131
134 return 0; 132 return 0;
135} 133}
@@ -142,18 +140,28 @@ static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
142 return 0; 140 return 0;
143} 141}
144 142
143static void free_relocation_chain(struct mips_hi16 *l)
144{
145 struct mips_hi16 *next;
146
147 while (l) {
148 next = l->next;
149 kfree(l);
150 l = next;
151 }
152}
153
145static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) 154static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
146{ 155{
147 unsigned long insnlo = *location; 156 unsigned long insnlo = *location;
157 struct mips_hi16 *l;
148 Elf_Addr val, vallo; 158 Elf_Addr val, vallo;
149 159
150 /* Sign extend the addend we extract from the lo insn. */ 160 /* Sign extend the addend we extract from the lo insn. */
151 vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; 161 vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
152 162
153 if (mips_hi16_list != NULL) { 163 if (me->arch.r_mips_hi16_list != NULL) {
154 struct mips_hi16 *l; 164 l = me->arch.r_mips_hi16_list;
155
156 l = mips_hi16_list;
157 while (l != NULL) { 165 while (l != NULL) {
158 struct mips_hi16 *next; 166 struct mips_hi16 *next;
159 unsigned long insn; 167 unsigned long insn;
@@ -188,7 +196,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
188 l = next; 196 l = next;
189 } 197 }
190 198
191 mips_hi16_list = NULL; 199 me->arch.r_mips_hi16_list = NULL;
192 } 200 }
193 201
194 /* 202 /*
@@ -201,6 +209,9 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
201 return 0; 209 return 0;
202 210
203out_danger: 211out_danger:
212 free_relocation_chain(l);
213 me->arch.r_mips_hi16_list = NULL;
214
204 pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name); 215 pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name);
205 216
206 return -ENOEXEC; 217 return -ENOEXEC;
@@ -273,6 +284,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
273 pr_debug("Applying relocate section %u to %u\n", relsec, 284 pr_debug("Applying relocate section %u to %u\n", relsec,
274 sechdrs[relsec].sh_info); 285 sechdrs[relsec].sh_info);
275 286
287 me->arch.r_mips_hi16_list = NULL;
276 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 288 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
277 /* This is where to make the change */ 289 /* This is where to make the change */
278 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 290 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -296,6 +308,19 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
296 return res; 308 return res;
297 } 309 }
298 310
311 /*
312 * Normally the hi16 list should be deallocated at this point. A
313 * malformed binary however could contain a series of R_MIPS_HI16
314 * relocations not followed by a R_MIPS_LO16 relocation. In that
315 * case, free up the list and return an error.
316 */
317 if (me->arch.r_mips_hi16_list) {
318 free_relocation_chain(me->arch.r_mips_hi16_list);
319 me->arch.r_mips_hi16_list = NULL;
320
321 return -ENOEXEC;
322 }
323
299 return 0; 324 return 0;
300} 325}
301 326
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 31637d8c8738..9005bf9fb859 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -130,7 +130,7 @@ asmlinkage __cpuinit void start_secondary(void)
130 130
131 cpu_set(cpu, cpu_callin_map); 131 cpu_set(cpu, cpu_callin_map);
132 132
133 synchronise_count_slave(); 133 synchronise_count_slave(cpu);
134 134
135 /* 135 /*
136 * irq will be enabled in ->smp_finish(), enabling it too early 136 * irq will be enabled in ->smp_finish(), enabling it too early
@@ -173,7 +173,6 @@ void smp_send_stop(void)
173void __init smp_cpus_done(unsigned int max_cpus) 173void __init smp_cpus_done(unsigned int max_cpus)
174{ 174{
175 mp_ops->cpus_done(); 175 mp_ops->cpus_done();
176 synchronise_count_master();
177} 176}
178 177
179/* called from main before smp_init() */ 178/* called from main before smp_init() */
@@ -206,6 +205,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
206 while (!cpu_isset(cpu, cpu_callin_map)) 205 while (!cpu_isset(cpu, cpu_callin_map))
207 udelay(100); 206 udelay(100);
208 207
208 synchronise_count_master(cpu);
209 return 0; 209 return 0;
210} 210}
211 211
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 842d55e411fd..7f1eca3858de 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -28,12 +28,11 @@ static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
28#define COUNTON 100 28#define COUNTON 100
29#define NR_LOOPS 5 29#define NR_LOOPS 5
30 30
31void __cpuinit synchronise_count_master(void) 31void __cpuinit synchronise_count_master(int cpu)
32{ 32{
33 int i; 33 int i;
34 unsigned long flags; 34 unsigned long flags;
35 unsigned int initcount; 35 unsigned int initcount;
36 int nslaves;
37 36
38#ifdef CONFIG_MIPS_MT_SMTC 37#ifdef CONFIG_MIPS_MT_SMTC
39 /* 38 /*
@@ -43,8 +42,7 @@ void __cpuinit synchronise_count_master(void)
43 return; 42 return;
44#endif 43#endif
45 44
46 printk(KERN_INFO "Synchronize counters across %u CPUs: ", 45 printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
47 num_online_cpus());
48 46
49 local_irq_save(flags); 47 local_irq_save(flags);
50 48
@@ -52,7 +50,7 @@ void __cpuinit synchronise_count_master(void)
52 * Notify the slaves that it's time to start 50 * Notify the slaves that it's time to start
53 */ 51 */
54 atomic_set(&count_reference, read_c0_count()); 52 atomic_set(&count_reference, read_c0_count());
55 atomic_set(&count_start_flag, 1); 53 atomic_set(&count_start_flag, cpu);
56 smp_wmb(); 54 smp_wmb();
57 55
58 /* Count will be initialised to current timer for all CPU's */ 56 /* Count will be initialised to current timer for all CPU's */
@@ -69,10 +67,9 @@ void __cpuinit synchronise_count_master(void)
69 * two CPUs. 67 * two CPUs.
70 */ 68 */
71 69
72 nslaves = num_online_cpus()-1;
73 for (i = 0; i < NR_LOOPS; i++) { 70 for (i = 0; i < NR_LOOPS; i++) {
74 /* slaves loop on '!= ncpus' */ 71 /* slaves loop on '!= 2' */
75 while (atomic_read(&count_count_start) != nslaves) 72 while (atomic_read(&count_count_start) != 1)
76 mb(); 73 mb();
77 atomic_set(&count_count_stop, 0); 74 atomic_set(&count_count_stop, 0);
78 smp_wmb(); 75 smp_wmb();
@@ -89,7 +86,7 @@ void __cpuinit synchronise_count_master(void)
89 /* 86 /*
90 * Wait for all slaves to leave the synchronization point: 87 * Wait for all slaves to leave the synchronization point:
91 */ 88 */
92 while (atomic_read(&count_count_stop) != nslaves) 89 while (atomic_read(&count_count_stop) != 1)
93 mb(); 90 mb();
94 atomic_set(&count_count_start, 0); 91 atomic_set(&count_count_start, 0);
95 smp_wmb(); 92 smp_wmb();
@@ -97,6 +94,7 @@ void __cpuinit synchronise_count_master(void)
97 } 94 }
98 /* Arrange for an interrupt in a short while */ 95 /* Arrange for an interrupt in a short while */
99 write_c0_compare(read_c0_count() + COUNTON); 96 write_c0_compare(read_c0_count() + COUNTON);
97 atomic_set(&count_start_flag, 0);
100 98
101 local_irq_restore(flags); 99 local_irq_restore(flags);
102 100
@@ -108,11 +106,10 @@ void __cpuinit synchronise_count_master(void)
108 printk("done.\n"); 106 printk("done.\n");
109} 107}
110 108
111void __cpuinit synchronise_count_slave(void) 109void __cpuinit synchronise_count_slave(int cpu)
112{ 110{
113 int i; 111 int i;
114 unsigned int initcount; 112 unsigned int initcount;
115 int ncpus;
116 113
117#ifdef CONFIG_MIPS_MT_SMTC 114#ifdef CONFIG_MIPS_MT_SMTC
118 /* 115 /*
@@ -127,16 +124,15 @@ void __cpuinit synchronise_count_slave(void)
127 * so we first wait for the master to say everyone is ready 124 * so we first wait for the master to say everyone is ready
128 */ 125 */
129 126
130 while (!atomic_read(&count_start_flag)) 127 while (atomic_read(&count_start_flag) != cpu)
131 mb(); 128 mb();
132 129
133 /* Count will be initialised to next expire for all CPU's */ 130 /* Count will be initialised to next expire for all CPU's */
134 initcount = atomic_read(&count_reference); 131 initcount = atomic_read(&count_reference);
135 132
136 ncpus = num_online_cpus();
137 for (i = 0; i < NR_LOOPS; i++) { 133 for (i = 0; i < NR_LOOPS; i++) {
138 atomic_inc(&count_count_start); 134 atomic_inc(&count_count_start);
139 while (atomic_read(&count_count_start) != ncpus) 135 while (atomic_read(&count_count_start) != 2)
140 mb(); 136 mb();
141 137
142 /* 138 /*
@@ -146,7 +142,7 @@ void __cpuinit synchronise_count_slave(void)
146 write_c0_count(initcount); 142 write_c0_count(initcount);
147 143
148 atomic_inc(&count_count_stop); 144 atomic_inc(&count_count_stop);
149 while (atomic_read(&count_count_stop) != ncpus) 145 while (atomic_read(&count_count_stop) != 2)
150 mb(); 146 mb();
151 } 147 }
152 /* Arrange for an interrupt in a short while */ 148 /* Arrange for an interrupt in a short while */
diff --git a/arch/mips/mti-malta/malta-pci.c b/arch/mips/mti-malta/malta-pci.c
index 284dea54faf5..2147cb34e705 100644
--- a/arch/mips/mti-malta/malta-pci.c
+++ b/arch/mips/mti-malta/malta-pci.c
@@ -252,16 +252,3 @@ void __init mips_pcibios_init(void)
252 252
253 register_pci_controller(controller); 253 register_pci_controller(controller);
254} 254}
255
256/* Enable PCI 2.1 compatibility in PIIX4 */
257static void __devinit quirk_dlcsetup(struct pci_dev *dev)
258{
259 u8 odlc, ndlc;
260 (void) pci_read_config_byte(dev, 0x82, &odlc);
261 /* Enable passive releases and delayed transaction */
262 ndlc = odlc | 7;
263 (void) pci_write_config_byte(dev, 0x82, ndlc);
264}
265
266DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
267 quirk_dlcsetup);
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 414a7459858d..86d77a666458 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -23,9 +23,12 @@
23#define AR724X_PCI_MEM_BASE 0x10000000 23#define AR724X_PCI_MEM_BASE 0x10000000
24#define AR724X_PCI_MEM_SIZE 0x08000000 24#define AR724X_PCI_MEM_SIZE 0x08000000
25 25
26#define AR724X_PCI_REG_RESET 0x18
26#define AR724X_PCI_REG_INT_STATUS 0x4c 27#define AR724X_PCI_REG_INT_STATUS 0x4c
27#define AR724X_PCI_REG_INT_MASK 0x50 28#define AR724X_PCI_REG_INT_MASK 0x50
28 29
30#define AR724X_PCI_RESET_LINK_UP BIT(0)
31
29#define AR724X_PCI_INT_DEV0 BIT(14) 32#define AR724X_PCI_INT_DEV0 BIT(14)
30 33
31#define AR724X_PCI_IRQ_COUNT 1 34#define AR724X_PCI_IRQ_COUNT 1
@@ -38,6 +41,15 @@ static void __iomem *ar724x_pci_ctrl_base;
38 41
39static u32 ar724x_pci_bar0_value; 42static u32 ar724x_pci_bar0_value;
40static bool ar724x_pci_bar0_is_cached; 43static bool ar724x_pci_bar0_is_cached;
44static bool ar724x_pci_link_up;
45
46static inline bool ar724x_pci_check_link(void)
47{
48 u32 reset;
49
50 reset = __raw_readl(ar724x_pci_ctrl_base + AR724X_PCI_REG_RESET);
51 return reset & AR724X_PCI_RESET_LINK_UP;
52}
41 53
42static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where, 54static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
43 int size, uint32_t *value) 55 int size, uint32_t *value)
@@ -46,6 +58,9 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
46 void __iomem *base; 58 void __iomem *base;
47 u32 data; 59 u32 data;
48 60
61 if (!ar724x_pci_link_up)
62 return PCIBIOS_DEVICE_NOT_FOUND;
63
49 if (devfn) 64 if (devfn)
50 return PCIBIOS_DEVICE_NOT_FOUND; 65 return PCIBIOS_DEVICE_NOT_FOUND;
51 66
@@ -96,6 +111,9 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
96 u32 data; 111 u32 data;
97 int s; 112 int s;
98 113
114 if (!ar724x_pci_link_up)
115 return PCIBIOS_DEVICE_NOT_FOUND;
116
99 if (devfn) 117 if (devfn)
100 return PCIBIOS_DEVICE_NOT_FOUND; 118 return PCIBIOS_DEVICE_NOT_FOUND;
101 119
@@ -280,6 +298,10 @@ int __init ar724x_pcibios_init(int irq)
280 if (ar724x_pci_ctrl_base == NULL) 298 if (ar724x_pci_ctrl_base == NULL)
281 goto err_unmap_devcfg; 299 goto err_unmap_devcfg;
282 300
301 ar724x_pci_link_up = ar724x_pci_check_link();
302 if (!ar724x_pci_link_up)
303 pr_warn("ar724x: PCIe link is down\n");
304
283 ar724x_pci_irq_init(irq); 305 ar724x_pci_irq_init(irq);
284 register_pci_controller(&ar724x_pci_controller); 306 register_pci_controller(&ar724x_pci_controller);
285 307
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 6c6defc24619..af9cf30ed474 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -141,7 +141,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
141 141
142#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) 142#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
143 143
144#define ATOMIC_INIT(i) ((atomic_t) { (i) }) 144#define ATOMIC_INIT(i) { (i) }
145 145
146#define smp_mb__before_atomic_dec() smp_mb() 146#define smp_mb__before_atomic_dec() smp_mb()
147#define smp_mb__after_atomic_dec() smp_mb() 147#define smp_mb__after_atomic_dec() smp_mb()
@@ -150,7 +150,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
150 150
151#ifdef CONFIG_64BIT 151#ifdef CONFIG_64BIT
152 152
153#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) 153#define ATOMIC64_INIT(i) { (i) }
154 154
155static __inline__ s64 155static __inline__ s64
156__atomic64_add_return(s64 i, atomic64_t *v) 156__atomic64_add_return(s64 i, atomic64_t *v)
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index d4b94b395c16..2c05a9292a81 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -309,7 +309,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
309 cregs->ksp = (unsigned long)stack 309 cregs->ksp = (unsigned long)stack
310 + (pregs->gr[21] & (THREAD_SIZE - 1)); 310 + (pregs->gr[21] & (THREAD_SIZE - 1));
311 cregs->gr[30] = usp; 311 cregs->gr[30] = usp;
312 if (p->personality == PER_HPUX) { 312 if (personality(p->personality) == PER_HPUX) {
313#ifdef CONFIG_HPUX 313#ifdef CONFIG_HPUX
314 cregs->kpc = (unsigned long) &hpux_child_return; 314 cregs->kpc = (unsigned long) &hpux_child_return;
315#else 315#else
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index c9b932260f47..7426e40699bd 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -225,12 +225,12 @@ long parisc_personality(unsigned long personality)
225 long err; 225 long err;
226 226
227 if (personality(current->personality) == PER_LINUX32 227 if (personality(current->personality) == PER_LINUX32
228 && personality == PER_LINUX) 228 && personality(personality) == PER_LINUX)
229 personality = PER_LINUX32; 229 personality = (personality & ~PER_MASK) | PER_LINUX32;
230 230
231 err = sys_personality(personality); 231 err = sys_personality(personality);
232 if (err == PER_LINUX32) 232 if (personality(err) == PER_LINUX32)
233 err = PER_LINUX; 233 err = (err & ~PER_MASK) | PER_LINUX;
234 234
235 return err; 235 return err;
236} 236}
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
index 8d35d2c1f694..4f9c9f682ecf 100644
--- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
@@ -345,6 +345,13 @@
345/include/ "qoriq-duart-1.dtsi" 345/include/ "qoriq-duart-1.dtsi"
346/include/ "qoriq-gpio-0.dtsi" 346/include/ "qoriq-gpio-0.dtsi"
347/include/ "qoriq-usb2-mph-0.dtsi" 347/include/ "qoriq-usb2-mph-0.dtsi"
348 usb@210000 {
349 compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
350 port0;
351 };
348/include/ "qoriq-usb2-dr-0.dtsi" 352/include/ "qoriq-usb2-dr-0.dtsi"
353 usb@211000 {
354 compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
355 };
349/include/ "qoriq-sec4.0-0.dtsi" 356/include/ "qoriq-sec4.0-0.dtsi"
350}; 357};
diff --git a/arch/powerpc/configs/85xx/p1023rds_defconfig b/arch/powerpc/configs/85xx/p1023rds_defconfig
index f4337bacd0e7..26e541c4662b 100644
--- a/arch/powerpc/configs/85xx/p1023rds_defconfig
+++ b/arch/powerpc/configs/85xx/p1023rds_defconfig
@@ -6,28 +6,27 @@ CONFIG_SYSVIPC=y
6CONFIG_POSIX_MQUEUE=y 6CONFIG_POSIX_MQUEUE=y
7CONFIG_BSD_PROCESS_ACCT=y 7CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_AUDIT=y 8CONFIG_AUDIT=y
9CONFIG_SPARSE_IRQ=y 9CONFIG_IRQ_DOMAIN_DEBUG=y
10CONFIG_NO_HZ=y
11CONFIG_HIGH_RES_TIMERS=y
10CONFIG_IKCONFIG=y 12CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y 13CONFIG_IKCONFIG_PROC=y
12CONFIG_LOG_BUF_SHIFT=14 14CONFIG_LOG_BUF_SHIFT=14
13CONFIG_BLK_DEV_INITRD=y 15CONFIG_BLK_DEV_INITRD=y
14# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
15CONFIG_KALLSYMS_ALL=y 16CONFIG_KALLSYMS_ALL=y
16CONFIG_KALLSYMS_EXTRA_PASS=y
17CONFIG_EMBEDDED=y 17CONFIG_EMBEDDED=y
18CONFIG_MODULES=y 18CONFIG_MODULES=y
19CONFIG_MODULE_UNLOAD=y 19CONFIG_MODULE_UNLOAD=y
20CONFIG_MODULE_FORCE_UNLOAD=y 20CONFIG_MODULE_FORCE_UNLOAD=y
21CONFIG_MODVERSIONS=y 21CONFIG_MODVERSIONS=y
22# CONFIG_BLK_DEV_BSG is not set 22# CONFIG_BLK_DEV_BSG is not set
23CONFIG_PARTITION_ADVANCED=y
24CONFIG_MAC_PARTITION=y
23CONFIG_P1023_RDS=y 25CONFIG_P1023_RDS=y
24CONFIG_QUICC_ENGINE=y 26CONFIG_QUICC_ENGINE=y
25CONFIG_QE_GPIO=y 27CONFIG_QE_GPIO=y
26CONFIG_CPM2=y 28CONFIG_CPM2=y
27CONFIG_GPIO_MPC8XXX=y
28CONFIG_HIGHMEM=y 29CONFIG_HIGHMEM=y
29CONFIG_NO_HZ=y
30CONFIG_HIGH_RES_TIMERS=y
31# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 30# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
32CONFIG_BINFMT_MISC=m 31CONFIG_BINFMT_MISC=m
33CONFIG_MATH_EMULATION=y 32CONFIG_MATH_EMULATION=y
@@ -63,11 +62,11 @@ CONFIG_INET_ESP=y
63CONFIG_IPV6=y 62CONFIG_IPV6=y
64CONFIG_IP_SCTP=m 63CONFIG_IP_SCTP=m
65CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 64CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
65CONFIG_DEVTMPFS=y
66CONFIG_PROC_DEVICETREE=y 66CONFIG_PROC_DEVICETREE=y
67CONFIG_BLK_DEV_LOOP=y 67CONFIG_BLK_DEV_LOOP=y
68CONFIG_BLK_DEV_RAM=y 68CONFIG_BLK_DEV_RAM=y
69CONFIG_BLK_DEV_RAM_SIZE=131072 69CONFIG_BLK_DEV_RAM_SIZE=131072
70CONFIG_MISC_DEVICES=y
71CONFIG_EEPROM_LEGACY=y 70CONFIG_EEPROM_LEGACY=y
72CONFIG_BLK_DEV_SD=y 71CONFIG_BLK_DEV_SD=y
73CONFIG_CHR_DEV_ST=y 72CONFIG_CHR_DEV_ST=y
@@ -80,15 +79,14 @@ CONFIG_SATA_FSL=y
80CONFIG_SATA_SIL24=y 79CONFIG_SATA_SIL24=y
81CONFIG_NETDEVICES=y 80CONFIG_NETDEVICES=y
82CONFIG_DUMMY=y 81CONFIG_DUMMY=y
82CONFIG_FS_ENET=y
83CONFIG_FSL_PQ_MDIO=y
84CONFIG_E1000E=y
83CONFIG_MARVELL_PHY=y 85CONFIG_MARVELL_PHY=y
84CONFIG_DAVICOM_PHY=y 86CONFIG_DAVICOM_PHY=y
85CONFIG_CICADA_PHY=y 87CONFIG_CICADA_PHY=y
86CONFIG_VITESSE_PHY=y 88CONFIG_VITESSE_PHY=y
87CONFIG_FIXED_PHY=y 89CONFIG_FIXED_PHY=y
88CONFIG_NET_ETHERNET=y
89CONFIG_FS_ENET=y
90CONFIG_E1000E=y
91CONFIG_FSL_PQ_MDIO=y
92CONFIG_INPUT_FF_MEMLESS=m 90CONFIG_INPUT_FF_MEMLESS=m
93# CONFIG_INPUT_MOUSEDEV is not set 91# CONFIG_INPUT_MOUSEDEV is not set
94# CONFIG_INPUT_KEYBOARD is not set 92# CONFIG_INPUT_KEYBOARD is not set
@@ -98,16 +96,15 @@ CONFIG_SERIAL_8250=y
98CONFIG_SERIAL_8250_CONSOLE=y 96CONFIG_SERIAL_8250_CONSOLE=y
99CONFIG_SERIAL_8250_NR_UARTS=2 97CONFIG_SERIAL_8250_NR_UARTS=2
100CONFIG_SERIAL_8250_RUNTIME_UARTS=2 98CONFIG_SERIAL_8250_RUNTIME_UARTS=2
101CONFIG_SERIAL_8250_EXTENDED=y
102CONFIG_SERIAL_8250_MANY_PORTS=y 99CONFIG_SERIAL_8250_MANY_PORTS=y
103CONFIG_SERIAL_8250_DETECT_IRQ=y 100CONFIG_SERIAL_8250_DETECT_IRQ=y
104CONFIG_SERIAL_8250_RSA=y 101CONFIG_SERIAL_8250_RSA=y
105CONFIG_SERIAL_QE=m 102CONFIG_SERIAL_QE=m
106CONFIG_HW_RANDOM=y
107CONFIG_NVRAM=y 103CONFIG_NVRAM=y
108CONFIG_I2C=y 104CONFIG_I2C=y
109CONFIG_I2C_CPM=m 105CONFIG_I2C_CPM=m
110CONFIG_I2C_MPC=y 106CONFIG_I2C_MPC=y
107CONFIG_GPIO_MPC8XXX=y
111# CONFIG_HWMON is not set 108# CONFIG_HWMON is not set
112CONFIG_VIDEO_OUTPUT_CONTROL=y 109CONFIG_VIDEO_OUTPUT_CONTROL=y
113CONFIG_SOUND=y 110CONFIG_SOUND=y
@@ -123,7 +120,6 @@ CONFIG_DMADEVICES=y
123CONFIG_FSL_DMA=y 120CONFIG_FSL_DMA=y
124# CONFIG_NET_DMA is not set 121# CONFIG_NET_DMA is not set
125CONFIG_STAGING=y 122CONFIG_STAGING=y
126# CONFIG_STAGING_EXCLUDE_BUILD is not set
127CONFIG_EXT2_FS=y 123CONFIG_EXT2_FS=y
128CONFIG_EXT3_FS=y 124CONFIG_EXT3_FS=y
129# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 125# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -150,22 +146,15 @@ CONFIG_QNX4FS_FS=m
150CONFIG_SYSV_FS=m 146CONFIG_SYSV_FS=m
151CONFIG_UFS_FS=m 147CONFIG_UFS_FS=m
152CONFIG_NFS_FS=y 148CONFIG_NFS_FS=y
153CONFIG_NFS_V3=y
154CONFIG_NFS_V4=y 149CONFIG_NFS_V4=y
155CONFIG_ROOT_NFS=y 150CONFIG_ROOT_NFS=y
156CONFIG_NFSD=y 151CONFIG_NFSD=y
157CONFIG_PARTITION_ADVANCED=y
158CONFIG_MAC_PARTITION=y
159CONFIG_CRC_T10DIF=y 152CONFIG_CRC_T10DIF=y
160CONFIG_FRAME_WARN=8092 153CONFIG_FRAME_WARN=8092
161CONFIG_DEBUG_FS=y 154CONFIG_DEBUG_FS=y
162CONFIG_DEBUG_KERNEL=y
163CONFIG_DETECT_HUNG_TASK=y 155CONFIG_DETECT_HUNG_TASK=y
164# CONFIG_DEBUG_BUGVERBOSE is not set 156# CONFIG_DEBUG_BUGVERBOSE is not set
165CONFIG_DEBUG_INFO=y 157CONFIG_DEBUG_INFO=y
166# CONFIG_RCU_CPU_STALL_DETECTOR is not set
167CONFIG_SYSCTL_SYSCALL_CHECK=y
168CONFIG_IRQ_DOMAIN_DEBUG=y
169CONFIG_CRYPTO_PCBC=m 158CONFIG_CRYPTO_PCBC=m
170CONFIG_CRYPTO_SHA256=y 159CONFIG_CRYPTO_SHA256=y
171CONFIG_CRYPTO_SHA512=y 160CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index cbb98c1234fd..8b3d57c1ebe8 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -6,8 +6,8 @@ CONFIG_SYSVIPC=y
6CONFIG_POSIX_MQUEUE=y 6CONFIG_POSIX_MQUEUE=y
7CONFIG_BSD_PROCESS_ACCT=y 7CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_AUDIT=y 8CONFIG_AUDIT=y
9CONFIG_SPARSE_IRQ=y 9CONFIG_NO_HZ=y
10CONFIG_RCU_TRACE=y 10CONFIG_HIGH_RES_TIMERS=y
11CONFIG_IKCONFIG=y 11CONFIG_IKCONFIG=y
12CONFIG_IKCONFIG_PROC=y 12CONFIG_IKCONFIG_PROC=y
13CONFIG_LOG_BUF_SHIFT=14 13CONFIG_LOG_BUF_SHIFT=14
@@ -21,23 +21,22 @@ CONFIG_MODULE_UNLOAD=y
21CONFIG_MODULE_FORCE_UNLOAD=y 21CONFIG_MODULE_FORCE_UNLOAD=y
22CONFIG_MODVERSIONS=y 22CONFIG_MODVERSIONS=y
23# CONFIG_BLK_DEV_BSG is not set 23# CONFIG_BLK_DEV_BSG is not set
24CONFIG_PARTITION_ADVANCED=y
25CONFIG_MAC_PARTITION=y
24CONFIG_P2041_RDB=y 26CONFIG_P2041_RDB=y
25CONFIG_P3041_DS=y 27CONFIG_P3041_DS=y
26CONFIG_P4080_DS=y 28CONFIG_P4080_DS=y
27CONFIG_P5020_DS=y 29CONFIG_P5020_DS=y
28CONFIG_HIGHMEM=y 30CONFIG_HIGHMEM=y
29CONFIG_NO_HZ=y
30CONFIG_HIGH_RES_TIMERS=y
31# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 31# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
32CONFIG_BINFMT_MISC=m 32CONFIG_BINFMT_MISC=m
33CONFIG_KEXEC=y 33CONFIG_KEXEC=y
34CONFIG_IRQ_ALL_CPUS=y 34CONFIG_IRQ_ALL_CPUS=y
35CONFIG_FORCE_MAX_ZONEORDER=13 35CONFIG_FORCE_MAX_ZONEORDER=13
36CONFIG_FSL_LBC=y
37CONFIG_PCI=y 36CONFIG_PCI=y
38CONFIG_PCIEPORTBUS=y 37CONFIG_PCIEPORTBUS=y
39CONFIG_PCI_MSI=y
40# CONFIG_PCIEASPM is not set 38# CONFIG_PCIEASPM is not set
39CONFIG_PCI_MSI=y
41CONFIG_RAPIDIO=y 40CONFIG_RAPIDIO=y
42CONFIG_FSL_RIO=y 41CONFIG_FSL_RIO=y
43CONFIG_NET=y 42CONFIG_NET=y
@@ -70,6 +69,7 @@ CONFIG_INET_IPCOMP=y
70CONFIG_IPV6=y 69CONFIG_IPV6=y
71CONFIG_IP_SCTP=m 70CONFIG_IP_SCTP=m
72CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 71CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
72CONFIG_DEVTMPFS=y
73CONFIG_MTD=y 73CONFIG_MTD=y
74CONFIG_MTD_CMDLINE_PARTS=y 74CONFIG_MTD_CMDLINE_PARTS=y
75CONFIG_MTD_CHAR=y 75CONFIG_MTD_CHAR=y
@@ -77,17 +77,14 @@ CONFIG_MTD_BLOCK=y
77CONFIG_MTD_CFI=y 77CONFIG_MTD_CFI=y
78CONFIG_MTD_CFI_AMDSTD=y 78CONFIG_MTD_CFI_AMDSTD=y
79CONFIG_MTD_PHYSMAP_OF=y 79CONFIG_MTD_PHYSMAP_OF=y
80CONFIG_MTD_M25P80=y
80CONFIG_MTD_NAND=y 81CONFIG_MTD_NAND=y
81CONFIG_MTD_NAND_ECC=y
82CONFIG_MTD_NAND_IDS=y
83CONFIG_MTD_NAND_FSL_IFC=y
84CONFIG_MTD_NAND_FSL_ELBC=y 82CONFIG_MTD_NAND_FSL_ELBC=y
85CONFIG_MTD_M25P80=y 83CONFIG_MTD_NAND_FSL_IFC=y
86CONFIG_PROC_DEVICETREE=y 84CONFIG_PROC_DEVICETREE=y
87CONFIG_BLK_DEV_LOOP=y 85CONFIG_BLK_DEV_LOOP=y
88CONFIG_BLK_DEV_RAM=y 86CONFIG_BLK_DEV_RAM=y
89CONFIG_BLK_DEV_RAM_SIZE=131072 87CONFIG_BLK_DEV_RAM_SIZE=131072
90CONFIG_MISC_DEVICES=y
91CONFIG_BLK_DEV_SD=y 88CONFIG_BLK_DEV_SD=y
92CONFIG_CHR_DEV_ST=y 89CONFIG_CHR_DEV_ST=y
93CONFIG_BLK_DEV_SR=y 90CONFIG_BLK_DEV_SR=y
@@ -115,11 +112,9 @@ CONFIG_SERIO_LIBPS2=y
115CONFIG_PPC_EPAPR_HV_BYTECHAN=y 112CONFIG_PPC_EPAPR_HV_BYTECHAN=y
116CONFIG_SERIAL_8250=y 113CONFIG_SERIAL_8250=y
117CONFIG_SERIAL_8250_CONSOLE=y 114CONFIG_SERIAL_8250_CONSOLE=y
118CONFIG_SERIAL_8250_EXTENDED=y
119CONFIG_SERIAL_8250_MANY_PORTS=y 115CONFIG_SERIAL_8250_MANY_PORTS=y
120CONFIG_SERIAL_8250_DETECT_IRQ=y 116CONFIG_SERIAL_8250_DETECT_IRQ=y
121CONFIG_SERIAL_8250_RSA=y 117CONFIG_SERIAL_8250_RSA=y
122CONFIG_HW_RANDOM=y
123CONFIG_NVRAM=y 118CONFIG_NVRAM=y
124CONFIG_I2C=y 119CONFIG_I2C=y
125CONFIG_I2C_CHARDEV=y 120CONFIG_I2C_CHARDEV=y
@@ -132,7 +127,6 @@ CONFIG_SPI_FSL_ESPI=y
132CONFIG_VIDEO_OUTPUT_CONTROL=y 127CONFIG_VIDEO_OUTPUT_CONTROL=y
133CONFIG_USB_HID=m 128CONFIG_USB_HID=m
134CONFIG_USB=y 129CONFIG_USB=y
135CONFIG_USB_DEVICEFS=y
136CONFIG_USB_MON=y 130CONFIG_USB_MON=y
137CONFIG_USB_EHCI_HCD=y 131CONFIG_USB_EHCI_HCD=y
138CONFIG_USB_EHCI_FSL=y 132CONFIG_USB_EHCI_FSL=y
@@ -142,8 +136,6 @@ CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
142CONFIG_USB_STORAGE=y 136CONFIG_USB_STORAGE=y
143CONFIG_MMC=y 137CONFIG_MMC=y
144CONFIG_MMC_SDHCI=y 138CONFIG_MMC_SDHCI=y
145CONFIG_MMC_SDHCI_OF=y
146CONFIG_MMC_SDHCI_OF_ESDHC=y
147CONFIG_EDAC=y 139CONFIG_EDAC=y
148CONFIG_EDAC_MM_EDAC=y 140CONFIG_EDAC_MM_EDAC=y
149CONFIG_EDAC_MPC85XX=y 141CONFIG_EDAC_MPC85XX=y
@@ -170,19 +162,16 @@ CONFIG_HUGETLBFS=y
170CONFIG_JFFS2_FS=y 162CONFIG_JFFS2_FS=y
171CONFIG_CRAMFS=y 163CONFIG_CRAMFS=y
172CONFIG_NFS_FS=y 164CONFIG_NFS_FS=y
173CONFIG_NFS_V3=y
174CONFIG_NFS_V4=y 165CONFIG_NFS_V4=y
175CONFIG_ROOT_NFS=y 166CONFIG_ROOT_NFS=y
176CONFIG_NFSD=m 167CONFIG_NFSD=m
177CONFIG_PARTITION_ADVANCED=y
178CONFIG_MAC_PARTITION=y
179CONFIG_NLS_ISO8859_1=y 168CONFIG_NLS_ISO8859_1=y
180CONFIG_NLS_UTF8=m 169CONFIG_NLS_UTF8=m
181CONFIG_MAGIC_SYSRQ=y 170CONFIG_MAGIC_SYSRQ=y
182CONFIG_DEBUG_SHIRQ=y 171CONFIG_DEBUG_SHIRQ=y
183CONFIG_DETECT_HUNG_TASK=y 172CONFIG_DETECT_HUNG_TASK=y
184CONFIG_DEBUG_INFO=y 173CONFIG_DEBUG_INFO=y
185CONFIG_SYSCTL_SYSCALL_CHECK=y 174CONFIG_RCU_TRACE=y
186CONFIG_CRYPTO_NULL=y 175CONFIG_CRYPTO_NULL=y
187CONFIG_CRYPTO_PCBC=m 176CONFIG_CRYPTO_PCBC=m
188CONFIG_CRYPTO_MD4=y 177CONFIG_CRYPTO_MD4=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index dd89de8b0b7f..0516e22ca3de 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -56,6 +56,7 @@ CONFIG_INET_ESP=y
56CONFIG_IPV6=y 56CONFIG_IPV6=y
57CONFIG_IP_SCTP=m 57CONFIG_IP_SCTP=m
58CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 58CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
59CONFIG_DEVTMPFS=y
59CONFIG_MTD=y 60CONFIG_MTD=y
60CONFIG_MTD_CMDLINE_PARTS=y 61CONFIG_MTD_CMDLINE_PARTS=y
61CONFIG_MTD_CHAR=y 62CONFIG_MTD_CHAR=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 15130066e5e2..07b7f2af2dca 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -1,8 +1,10 @@
1CONFIG_PPC64=y
2CONFIG_ALTIVEC=y
3CONFIG_SMP=y
4CONFIG_NR_CPUS=4
1CONFIG_EXPERIMENTAL=y 5CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y 6CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 7CONFIG_POSIX_MQUEUE=y
4CONFIG_NO_HZ=y
5CONFIG_HIGH_RES_TIMERS=y
6CONFIG_IKCONFIG=y 8CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y 9CONFIG_IKCONFIG_PROC=y
8CONFIG_BLK_DEV_INITRD=y 10CONFIG_BLK_DEV_INITRD=y
@@ -13,15 +15,16 @@ CONFIG_MODULES=y
13CONFIG_MODULE_UNLOAD=y 15CONFIG_MODULE_UNLOAD=y
14CONFIG_MODVERSIONS=y 16CONFIG_MODVERSIONS=y
15CONFIG_MODULE_SRCVERSION_ALL=y 17CONFIG_MODULE_SRCVERSION_ALL=y
16CONFIG_PARTITION_ADVANCED=y 18# CONFIG_PPC_PSERIES is not set
17CONFIG_MAC_PARTITION=y
18CONFIG_SMP=y
19CONFIG_NR_CPUS=4
20CONFIG_KEXEC=y
21# CONFIG_RELOCATABLE is not set
22CONFIG_CPU_FREQ=y 19CONFIG_CPU_FREQ=y
23CONFIG_CPU_FREQ_GOV_POWERSAVE=y 20CONFIG_CPU_FREQ_GOV_POWERSAVE=y
24CONFIG_CPU_FREQ_GOV_USERSPACE=y 21CONFIG_CPU_FREQ_GOV_USERSPACE=y
22CONFIG_CPU_FREQ_PMAC64=y
23CONFIG_NO_HZ=y
24CONFIG_HIGH_RES_TIMERS=y
25CONFIG_KEXEC=y
26CONFIG_IRQ_ALL_CPUS=y
27# CONFIG_MIGRATION is not set
25CONFIG_PCI_MSI=y 28CONFIG_PCI_MSI=y
26CONFIG_NET=y 29CONFIG_NET=y
27CONFIG_PACKET=y 30CONFIG_PACKET=y
@@ -49,6 +52,7 @@ CONFIG_NF_CT_NETLINK=m
49CONFIG_NF_CONNTRACK_IPV4=m 52CONFIG_NF_CONNTRACK_IPV4=m
50CONFIG_IP_NF_QUEUE=m 53CONFIG_IP_NF_QUEUE=m
51CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 54CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
55CONFIG_PROC_DEVICETREE=y
52CONFIG_BLK_DEV_LOOP=y 56CONFIG_BLK_DEV_LOOP=y
53CONFIG_BLK_DEV_NBD=m 57CONFIG_BLK_DEV_NBD=m
54CONFIG_BLK_DEV_RAM=y 58CONFIG_BLK_DEV_RAM=y
@@ -56,6 +60,8 @@ CONFIG_BLK_DEV_RAM_SIZE=65536
56CONFIG_CDROM_PKTCDVD=m 60CONFIG_CDROM_PKTCDVD=m
57CONFIG_IDE=y 61CONFIG_IDE=y
58CONFIG_BLK_DEV_IDECD=y 62CONFIG_BLK_DEV_IDECD=y
63CONFIG_BLK_DEV_IDE_PMAC=y
64CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
59CONFIG_BLK_DEV_SD=y 65CONFIG_BLK_DEV_SD=y
60CONFIG_CHR_DEV_ST=y 66CONFIG_CHR_DEV_ST=y
61CONFIG_BLK_DEV_SR=y 67CONFIG_BLK_DEV_SR=y
@@ -79,24 +85,33 @@ CONFIG_DM_CRYPT=m
79CONFIG_DM_SNAPSHOT=m 85CONFIG_DM_SNAPSHOT=m
80CONFIG_DM_MIRROR=m 86CONFIG_DM_MIRROR=m
81CONFIG_DM_ZERO=m 87CONFIG_DM_ZERO=m
82CONFIG_MACINTOSH_DRIVERS=y 88CONFIG_IEEE1394=y
89CONFIG_IEEE1394_OHCI1394=y
90CONFIG_IEEE1394_SBP2=m
91CONFIG_IEEE1394_ETH1394=m
92CONFIG_IEEE1394_RAWIO=y
93CONFIG_IEEE1394_VIDEO1394=m
94CONFIG_IEEE1394_DV1394=m
95CONFIG_ADB_PMU=y
96CONFIG_PMAC_SMU=y
83CONFIG_MAC_EMUMOUSEBTN=y 97CONFIG_MAC_EMUMOUSEBTN=y
98CONFIG_THERM_PM72=y
99CONFIG_WINDFARM=y
100CONFIG_WINDFARM_PM81=y
101CONFIG_WINDFARM_PM91=y
102CONFIG_WINDFARM_PM112=y
103CONFIG_WINDFARM_PM121=y
84CONFIG_NETDEVICES=y 104CONFIG_NETDEVICES=y
85CONFIG_BONDING=m
86CONFIG_DUMMY=m 105CONFIG_DUMMY=m
87CONFIG_MII=y 106CONFIG_BONDING=m
88CONFIG_TUN=m 107CONFIG_TUN=m
108CONFIG_NET_ETHERNET=y
109CONFIG_MII=y
110CONFIG_SUNGEM=y
89CONFIG_ACENIC=m 111CONFIG_ACENIC=m
90CONFIG_ACENIC_OMIT_TIGON_I=y 112CONFIG_ACENIC_OMIT_TIGON_I=y
91CONFIG_TIGON3=y
92CONFIG_E1000=y 113CONFIG_E1000=y
93CONFIG_SUNGEM=y 114CONFIG_TIGON3=y
94CONFIG_PPP=m
95CONFIG_PPP_BSDCOMP=m
96CONFIG_PPP_DEFLATE=m
97CONFIG_PPPOE=m
98CONFIG_PPP_ASYNC=m
99CONFIG_PPP_SYNC_TTY=m
100CONFIG_USB_CATC=m 115CONFIG_USB_CATC=m
101CONFIG_USB_KAWETH=m 116CONFIG_USB_KAWETH=m
102CONFIG_USB_PEGASUS=m 117CONFIG_USB_PEGASUS=m
@@ -106,24 +121,36 @@ CONFIG_USB_USBNET=m
106# CONFIG_USB_NET_NET1080 is not set 121# CONFIG_USB_NET_NET1080 is not set
107# CONFIG_USB_NET_CDC_SUBSET is not set 122# CONFIG_USB_NET_CDC_SUBSET is not set
108# CONFIG_USB_NET_ZAURUS is not set 123# CONFIG_USB_NET_ZAURUS is not set
124CONFIG_PPP=m
125CONFIG_PPP_ASYNC=m
126CONFIG_PPP_SYNC_TTY=m
127CONFIG_PPP_DEFLATE=m
128CONFIG_PPP_BSDCOMP=m
129CONFIG_PPPOE=m
109# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 130# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
110CONFIG_INPUT_JOYDEV=m 131CONFIG_INPUT_JOYDEV=m
111CONFIG_INPUT_EVDEV=y 132CONFIG_INPUT_EVDEV=y
133# CONFIG_KEYBOARD_ATKBD is not set
112# CONFIG_MOUSE_PS2 is not set 134# CONFIG_MOUSE_PS2 is not set
135# CONFIG_SERIO_I8042 is not set
113# CONFIG_SERIO_SERPORT is not set 136# CONFIG_SERIO_SERPORT is not set
114CONFIG_VT_HW_CONSOLE_BINDING=y
115# CONFIG_HW_RANDOM is not set 137# CONFIG_HW_RANDOM is not set
116CONFIG_GEN_RTC=y 138CONFIG_GEN_RTC=y
117CONFIG_RAW_DRIVER=y 139CONFIG_RAW_DRIVER=y
118CONFIG_I2C_CHARDEV=y 140CONFIG_I2C_CHARDEV=y
119# CONFIG_HWMON is not set 141# CONFIG_HWMON is not set
120CONFIG_AGP=y 142CONFIG_AGP=m
121CONFIG_DRM=y 143CONFIG_AGP_UNINORTH=m
122CONFIG_DRM_NOUVEAU=y
123CONFIG_VIDEO_OUTPUT_CONTROL=m 144CONFIG_VIDEO_OUTPUT_CONTROL=m
145CONFIG_FB=y
124CONFIG_FIRMWARE_EDID=y 146CONFIG_FIRMWARE_EDID=y
125CONFIG_FB_TILEBLITTING=y 147CONFIG_FB_TILEBLITTING=y
148CONFIG_FB_OF=y
149CONFIG_FB_NVIDIA=y
150CONFIG_FB_NVIDIA_I2C=y
126CONFIG_FB_RADEON=y 151CONFIG_FB_RADEON=y
152# CONFIG_VGA_CONSOLE is not set
153CONFIG_FRAMEBUFFER_CONSOLE=y
127CONFIG_LOGO=y 154CONFIG_LOGO=y
128CONFIG_SOUND=m 155CONFIG_SOUND=m
129CONFIG_SND=m 156CONFIG_SND=m
@@ -131,7 +158,15 @@ CONFIG_SND_SEQUENCER=m
131CONFIG_SND_MIXER_OSS=m 158CONFIG_SND_MIXER_OSS=m
132CONFIG_SND_PCM_OSS=m 159CONFIG_SND_PCM_OSS=m
133CONFIG_SND_SEQUENCER_OSS=y 160CONFIG_SND_SEQUENCER_OSS=y
161CONFIG_SND_POWERMAC=m
162CONFIG_SND_AOA=m
163CONFIG_SND_AOA_FABRIC_LAYOUT=m
164CONFIG_SND_AOA_ONYX=m
165CONFIG_SND_AOA_TAS=m
166CONFIG_SND_AOA_TOONIE=m
134CONFIG_SND_USB_AUDIO=m 167CONFIG_SND_USB_AUDIO=m
168CONFIG_HID_PID=y
169CONFIG_USB_HIDDEV=y
135CONFIG_HID_GYRATION=y 170CONFIG_HID_GYRATION=y
136CONFIG_LOGITECH_FF=y 171CONFIG_LOGITECH_FF=y
137CONFIG_HID_PANTHERLORD=y 172CONFIG_HID_PANTHERLORD=y
@@ -139,12 +174,13 @@ CONFIG_HID_PETALYNX=y
139CONFIG_HID_SAMSUNG=y 174CONFIG_HID_SAMSUNG=y
140CONFIG_HID_SONY=y 175CONFIG_HID_SONY=y
141CONFIG_HID_SUNPLUS=y 176CONFIG_HID_SUNPLUS=y
142CONFIG_HID_PID=y
143CONFIG_USB_HIDDEV=y
144CONFIG_USB=y 177CONFIG_USB=y
178CONFIG_USB_DEVICEFS=y
145CONFIG_USB_MON=y 179CONFIG_USB_MON=y
146CONFIG_USB_EHCI_HCD=y 180CONFIG_USB_EHCI_HCD=y
181# CONFIG_USB_EHCI_HCD_PPC_OF is not set
147CONFIG_USB_OHCI_HCD=y 182CONFIG_USB_OHCI_HCD=y
183CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
148CONFIG_USB_ACM=m 184CONFIG_USB_ACM=m
149CONFIG_USB_PRINTER=y 185CONFIG_USB_PRINTER=y
150CONFIG_USB_STORAGE=y 186CONFIG_USB_STORAGE=y
@@ -208,6 +244,8 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
208CONFIG_REISERFS_FS_SECURITY=y 244CONFIG_REISERFS_FS_SECURITY=y
209CONFIG_XFS_FS=m 245CONFIG_XFS_FS=m
210CONFIG_XFS_POSIX_ACL=y 246CONFIG_XFS_POSIX_ACL=y
247CONFIG_INOTIFY=y
248CONFIG_AUTOFS_FS=m
211CONFIG_ISO9660_FS=y 249CONFIG_ISO9660_FS=y
212CONFIG_JOLIET=y 250CONFIG_JOLIET=y
213CONFIG_ZISOFS=y 251CONFIG_ZISOFS=y
@@ -221,12 +259,14 @@ CONFIG_HFS_FS=m
221CONFIG_HFSPLUS_FS=m 259CONFIG_HFSPLUS_FS=m
222CONFIG_CRAMFS=y 260CONFIG_CRAMFS=y
223CONFIG_NFS_FS=y 261CONFIG_NFS_FS=y
262CONFIG_NFS_V3=y
224CONFIG_NFS_V3_ACL=y 263CONFIG_NFS_V3_ACL=y
225CONFIG_NFS_V4=y 264CONFIG_NFS_V4=y
226CONFIG_NFSD=y 265CONFIG_NFSD=y
227CONFIG_NFSD_V3_ACL=y 266CONFIG_NFSD_V3_ACL=y
228CONFIG_NFSD_V4=y 267CONFIG_NFSD_V4=y
229CONFIG_CIFS=m 268CONFIG_CIFS=m
269CONFIG_PARTITION_ADVANCED=y
230CONFIG_NLS_CODEPAGE_437=y 270CONFIG_NLS_CODEPAGE_437=y
231CONFIG_NLS_CODEPAGE_1250=y 271CONFIG_NLS_CODEPAGE_1250=y
232CONFIG_NLS_CODEPAGE_1251=y 272CONFIG_NLS_CODEPAGE_1251=y
@@ -234,23 +274,29 @@ CONFIG_NLS_ASCII=y
234CONFIG_NLS_ISO8859_1=y 274CONFIG_NLS_ISO8859_1=y
235CONFIG_NLS_ISO8859_15=y 275CONFIG_NLS_ISO8859_15=y
236CONFIG_NLS_UTF8=y 276CONFIG_NLS_UTF8=y
277CONFIG_CRC_T10DIF=y
278CONFIG_LIBCRC32C=m
237CONFIG_MAGIC_SYSRQ=y 279CONFIG_MAGIC_SYSRQ=y
238# CONFIG_UNUSED_SYMBOLS is not set
239CONFIG_DEBUG_FS=y 280CONFIG_DEBUG_FS=y
240CONFIG_DEBUG_KERNEL=y 281CONFIG_DEBUG_KERNEL=y
241CONFIG_DEBUG_MUTEXES=y 282CONFIG_DEBUG_MUTEXES=y
283# CONFIG_RCU_CPU_STALL_DETECTOR is not set
242CONFIG_LATENCYTOP=y 284CONFIG_LATENCYTOP=y
243CONFIG_STRICT_DEVMEM=y 285CONFIG_SYSCTL_SYSCALL_CHECK=y
286CONFIG_BOOTX_TEXT=y
244CONFIG_CRYPTO_NULL=m 287CONFIG_CRYPTO_NULL=m
245CONFIG_CRYPTO_TEST=m 288CONFIG_CRYPTO_TEST=m
289CONFIG_CRYPTO_ECB=m
246CONFIG_CRYPTO_PCBC=m 290CONFIG_CRYPTO_PCBC=m
247CONFIG_CRYPTO_HMAC=y 291CONFIG_CRYPTO_HMAC=y
292CONFIG_CRYPTO_MD4=m
248CONFIG_CRYPTO_MICHAEL_MIC=m 293CONFIG_CRYPTO_MICHAEL_MIC=m
249CONFIG_CRYPTO_SHA256=m 294CONFIG_CRYPTO_SHA256=m
250CONFIG_CRYPTO_SHA512=m 295CONFIG_CRYPTO_SHA512=m
251CONFIG_CRYPTO_WP512=m 296CONFIG_CRYPTO_WP512=m
252CONFIG_CRYPTO_AES=m 297CONFIG_CRYPTO_AES=m
253CONFIG_CRYPTO_ANUBIS=m 298CONFIG_CRYPTO_ANUBIS=m
299CONFIG_CRYPTO_ARC4=m
254CONFIG_CRYPTO_BLOWFISH=m 300CONFIG_CRYPTO_BLOWFISH=m
255CONFIG_CRYPTO_CAST5=m 301CONFIG_CRYPTO_CAST5=m
256CONFIG_CRYPTO_CAST6=m 302CONFIG_CRYPTO_CAST6=m
@@ -260,6 +306,3 @@ CONFIG_CRYPTO_TEA=m
260CONFIG_CRYPTO_TWOFISH=m 306CONFIG_CRYPTO_TWOFISH=m
261# CONFIG_CRYPTO_ANSI_CPRNG is not set 307# CONFIG_CRYPTO_ANSI_CPRNG is not set
262# CONFIG_CRYPTO_HW is not set 308# CONFIG_CRYPTO_HW is not set
263# CONFIG_VIRTUALIZATION is not set
264CONFIG_CRC_T10DIF=y
265CONFIG_LIBCRC32C=m
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 5aac9a8bc53b..9352e4430c3b 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -2,12 +2,12 @@ CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_LOG_BUF_SHIFT=14 3CONFIG_LOG_BUF_SHIFT=14
4CONFIG_BLK_DEV_INITRD=y 4CONFIG_BLK_DEV_INITRD=y
5# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
6CONFIG_EXPERT=y 5CONFIG_EXPERT=y
7CONFIG_SLAB=y 6CONFIG_SLAB=y
8CONFIG_MODULES=y 7CONFIG_MODULES=y
9CONFIG_MODULE_UNLOAD=y 8CONFIG_MODULE_UNLOAD=y
10# CONFIG_BLK_DEV_BSG is not set 9# CONFIG_BLK_DEV_BSG is not set
10CONFIG_PARTITION_ADVANCED=y
11# CONFIG_PPC_CHRP is not set 11# CONFIG_PPC_CHRP is not set
12# CONFIG_PPC_PMAC is not set 12# CONFIG_PPC_PMAC is not set
13CONFIG_PPC_83xx=y 13CONFIG_PPC_83xx=y
@@ -25,7 +25,6 @@ CONFIG_ASP834x=y
25CONFIG_QUICC_ENGINE=y 25CONFIG_QUICC_ENGINE=y
26CONFIG_QE_GPIO=y 26CONFIG_QE_GPIO=y
27CONFIG_MATH_EMULATION=y 27CONFIG_MATH_EMULATION=y
28CONFIG_SPARSE_IRQ=y
29CONFIG_PCI=y 28CONFIG_PCI=y
30CONFIG_NET=y 29CONFIG_NET=y
31CONFIG_PACKET=y 30CONFIG_PACKET=y
@@ -42,10 +41,9 @@ CONFIG_INET_ESP=y
42# CONFIG_INET_LRO is not set 41# CONFIG_INET_LRO is not set
43# CONFIG_IPV6 is not set 42# CONFIG_IPV6 is not set
44CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
44CONFIG_DEVTMPFS=y
45# CONFIG_FW_LOADER is not set 45# CONFIG_FW_LOADER is not set
46CONFIG_MTD=y 46CONFIG_MTD=y
47CONFIG_MTD_PARTITIONS=y
48CONFIG_MTD_OF_PARTS=y
49CONFIG_MTD_CHAR=y 47CONFIG_MTD_CHAR=y
50CONFIG_MTD_BLOCK=y 48CONFIG_MTD_BLOCK=y
51CONFIG_MTD_CFI=y 49CONFIG_MTD_CFI=y
@@ -64,15 +62,14 @@ CONFIG_ATA=y
64CONFIG_SATA_FSL=y 62CONFIG_SATA_FSL=y
65CONFIG_SATA_SIL=y 63CONFIG_SATA_SIL=y
66CONFIG_NETDEVICES=y 64CONFIG_NETDEVICES=y
65CONFIG_MII=y
66CONFIG_UCC_GETH=y
67CONFIG_GIANFAR=y
67CONFIG_MARVELL_PHY=y 68CONFIG_MARVELL_PHY=y
68CONFIG_DAVICOM_PHY=y 69CONFIG_DAVICOM_PHY=y
69CONFIG_VITESSE_PHY=y 70CONFIG_VITESSE_PHY=y
70CONFIG_ICPLUS_PHY=y 71CONFIG_ICPLUS_PHY=y
71CONFIG_FIXED_PHY=y 72CONFIG_FIXED_PHY=y
72CONFIG_NET_ETHERNET=y
73CONFIG_MII=y
74CONFIG_GIANFAR=y
75CONFIG_UCC_GETH=y
76CONFIG_INPUT_FF_MEMLESS=m 73CONFIG_INPUT_FF_MEMLESS=m
77# CONFIG_INPUT_MOUSEDEV is not set 74# CONFIG_INPUT_MOUSEDEV is not set
78# CONFIG_INPUT_KEYBOARD is not set 75# CONFIG_INPUT_KEYBOARD is not set
@@ -112,17 +109,12 @@ CONFIG_RTC_DRV_DS1374=y
112CONFIG_EXT2_FS=y 109CONFIG_EXT2_FS=y
113CONFIG_EXT3_FS=y 110CONFIG_EXT3_FS=y
114# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 111# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
115CONFIG_INOTIFY=y
116CONFIG_PROC_KCORE=y 112CONFIG_PROC_KCORE=y
117CONFIG_TMPFS=y 113CONFIG_TMPFS=y
118CONFIG_NFS_FS=y 114CONFIG_NFS_FS=y
119CONFIG_NFS_V3=y
120CONFIG_NFS_V4=y 115CONFIG_NFS_V4=y
121CONFIG_ROOT_NFS=y 116CONFIG_ROOT_NFS=y
122CONFIG_PARTITION_ADVANCED=y
123CONFIG_CRC_T10DIF=y 117CONFIG_CRC_T10DIF=y
124# CONFIG_RCU_CPU_STALL_DETECTOR is not set
125CONFIG_SYSCTL_SYSCALL_CHECK=y
126CONFIG_CRYPTO_ECB=m 118CONFIG_CRYPTO_ECB=m
127CONFIG_CRYPTO_PCBC=m 119CONFIG_CRYPTO_PCBC=m
128CONFIG_CRYPTO_SHA256=y 120CONFIG_CRYPTO_SHA256=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 03ee911c4577..8b5bda27d248 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -5,7 +5,9 @@ CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y 5CONFIG_POSIX_MQUEUE=y
6CONFIG_BSD_PROCESS_ACCT=y 6CONFIG_BSD_PROCESS_ACCT=y
7CONFIG_AUDIT=y 7CONFIG_AUDIT=y
8CONFIG_SPARSE_IRQ=y 8CONFIG_IRQ_DOMAIN_DEBUG=y
9CONFIG_NO_HZ=y
10CONFIG_HIGH_RES_TIMERS=y
9CONFIG_IKCONFIG=y 11CONFIG_IKCONFIG=y
10CONFIG_IKCONFIG_PROC=y 12CONFIG_IKCONFIG_PROC=y
11CONFIG_LOG_BUF_SHIFT=14 13CONFIG_LOG_BUF_SHIFT=14
@@ -17,6 +19,8 @@ CONFIG_MODULE_UNLOAD=y
17CONFIG_MODULE_FORCE_UNLOAD=y 19CONFIG_MODULE_FORCE_UNLOAD=y
18CONFIG_MODVERSIONS=y 20CONFIG_MODVERSIONS=y
19# CONFIG_BLK_DEV_BSG is not set 21# CONFIG_BLK_DEV_BSG is not set
22CONFIG_PARTITION_ADVANCED=y
23CONFIG_MAC_PARTITION=y
20CONFIG_MPC8540_ADS=y 24CONFIG_MPC8540_ADS=y
21CONFIG_MPC8560_ADS=y 25CONFIG_MPC8560_ADS=y
22CONFIG_MPC85xx_CDS=y 26CONFIG_MPC85xx_CDS=y
@@ -40,8 +44,6 @@ CONFIG_SBC8548=y
40CONFIG_QUICC_ENGINE=y 44CONFIG_QUICC_ENGINE=y
41CONFIG_QE_GPIO=y 45CONFIG_QE_GPIO=y
42CONFIG_HIGHMEM=y 46CONFIG_HIGHMEM=y
43CONFIG_NO_HZ=y
44CONFIG_HIGH_RES_TIMERS=y
45CONFIG_BINFMT_MISC=m 47CONFIG_BINFMT_MISC=m
46CONFIG_MATH_EMULATION=y 48CONFIG_MATH_EMULATION=y
47CONFIG_FORCE_MAX_ZONEORDER=12 49CONFIG_FORCE_MAX_ZONEORDER=12
@@ -74,36 +76,25 @@ CONFIG_INET_ESP=y
74CONFIG_IPV6=y 76CONFIG_IPV6=y
75CONFIG_IP_SCTP=m 77CONFIG_IP_SCTP=m
76CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 78CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
79CONFIG_DEVTMPFS=y
77CONFIG_MTD=y 80CONFIG_MTD=y
78CONFIG_MTD_CMDLINE_PARTS=y 81CONFIG_MTD_CMDLINE_PARTS=y
79CONFIG_MTD_CHAR=y 82CONFIG_MTD_CHAR=y
80CONFIG_MTD_BLOCK=y 83CONFIG_MTD_BLOCK=y
81CONFIG_MTD_CFI=y
82CONFIG_FTL=y 84CONFIG_FTL=y
83CONFIG_MTD_GEN_PROBE=y 85CONFIG_MTD_CFI=y
84CONFIG_MTD_MAP_BANK_WIDTH_1=y
85CONFIG_MTD_MAP_BANK_WIDTH_2=y
86CONFIG_MTD_MAP_BANK_WIDTH_4=y
87CONFIG_MTD_CFI_I1=y
88CONFIG_MTD_CFI_I2=y
89CONFIG_MTD_CFI_INTELEXT=y 86CONFIG_MTD_CFI_INTELEXT=y
90CONFIG_MTD_CFI_AMDSTD=y 87CONFIG_MTD_CFI_AMDSTD=y
91CONFIG_MTD_CFI_UTIL=y
92CONFIG_MTD_PHYSMAP_OF=y 88CONFIG_MTD_PHYSMAP_OF=y
93CONFIG_MTD_PARTITIONS=y 89CONFIG_MTD_M25P80=y
94CONFIG_MTD_OF_PARTS=y
95CONFIG_MTD_NAND=y 90CONFIG_MTD_NAND=y
96CONFIG_MTD_NAND_FSL_ELBC=y 91CONFIG_MTD_NAND_FSL_ELBC=y
97CONFIG_MTD_NAND_FSL_IFC=y 92CONFIG_MTD_NAND_FSL_IFC=y
98CONFIG_MTD_NAND_IDS=y
99CONFIG_MTD_NAND_ECC=y
100CONFIG_MTD_M25P80=y
101CONFIG_PROC_DEVICETREE=y 93CONFIG_PROC_DEVICETREE=y
102CONFIG_BLK_DEV_LOOP=y 94CONFIG_BLK_DEV_LOOP=y
103CONFIG_BLK_DEV_NBD=y 95CONFIG_BLK_DEV_NBD=y
104CONFIG_BLK_DEV_RAM=y 96CONFIG_BLK_DEV_RAM=y
105CONFIG_BLK_DEV_RAM_SIZE=131072 97CONFIG_BLK_DEV_RAM_SIZE=131072
106CONFIG_MISC_DEVICES=y
107CONFIG_EEPROM_LEGACY=y 98CONFIG_EEPROM_LEGACY=y
108CONFIG_BLK_DEV_SD=y 99CONFIG_BLK_DEV_SD=y
109CONFIG_CHR_DEV_ST=y 100CONFIG_CHR_DEV_ST=y
@@ -115,6 +106,7 @@ CONFIG_ATA=y
115CONFIG_SATA_AHCI=y 106CONFIG_SATA_AHCI=y
116CONFIG_SATA_FSL=y 107CONFIG_SATA_FSL=y
117CONFIG_PATA_ALI=y 108CONFIG_PATA_ALI=y
109CONFIG_PATA_VIA=y
118CONFIG_NETDEVICES=y 110CONFIG_NETDEVICES=y
119CONFIG_DUMMY=y 111CONFIG_DUMMY=y
120CONFIG_FS_ENET=y 112CONFIG_FS_ENET=y
@@ -134,7 +126,6 @@ CONFIG_SERIAL_8250=y
134CONFIG_SERIAL_8250_CONSOLE=y 126CONFIG_SERIAL_8250_CONSOLE=y
135CONFIG_SERIAL_8250_NR_UARTS=2 127CONFIG_SERIAL_8250_NR_UARTS=2
136CONFIG_SERIAL_8250_RUNTIME_UARTS=2 128CONFIG_SERIAL_8250_RUNTIME_UARTS=2
137CONFIG_SERIAL_8250_EXTENDED=y
138CONFIG_SERIAL_8250_MANY_PORTS=y 129CONFIG_SERIAL_8250_MANY_PORTS=y
139CONFIG_SERIAL_8250_DETECT_IRQ=y 130CONFIG_SERIAL_8250_DETECT_IRQ=y
140CONFIG_SERIAL_8250_RSA=y 131CONFIG_SERIAL_8250_RSA=y
@@ -183,7 +174,6 @@ CONFIG_HID_SAMSUNG=y
183CONFIG_HID_SONY=y 174CONFIG_HID_SONY=y
184CONFIG_HID_SUNPLUS=y 175CONFIG_HID_SUNPLUS=y
185CONFIG_USB=y 176CONFIG_USB=y
186CONFIG_USB_DEVICEFS=y
187CONFIG_USB_MON=y 177CONFIG_USB_MON=y
188CONFIG_USB_EHCI_HCD=y 178CONFIG_USB_EHCI_HCD=y
189CONFIG_USB_EHCI_FSL=y 179CONFIG_USB_EHCI_FSL=y
@@ -229,18 +219,13 @@ CONFIG_QNX4FS_FS=m
229CONFIG_SYSV_FS=m 219CONFIG_SYSV_FS=m
230CONFIG_UFS_FS=m 220CONFIG_UFS_FS=m
231CONFIG_NFS_FS=y 221CONFIG_NFS_FS=y
232CONFIG_NFS_V3=y
233CONFIG_NFS_V4=y 222CONFIG_NFS_V4=y
234CONFIG_ROOT_NFS=y 223CONFIG_ROOT_NFS=y
235CONFIG_NFSD=y 224CONFIG_NFSD=y
236CONFIG_PARTITION_ADVANCED=y
237CONFIG_MAC_PARTITION=y
238CONFIG_CRC_T10DIF=y 225CONFIG_CRC_T10DIF=y
239CONFIG_DEBUG_FS=y 226CONFIG_DEBUG_FS=y
240CONFIG_DETECT_HUNG_TASK=y 227CONFIG_DETECT_HUNG_TASK=y
241CONFIG_DEBUG_INFO=y 228CONFIG_DEBUG_INFO=y
242CONFIG_SYSCTL_SYSCALL_CHECK=y
243CONFIG_IRQ_DOMAIN_DEBUG=y
244CONFIG_CRYPTO_PCBC=m 229CONFIG_CRYPTO_PCBC=m
245CONFIG_CRYPTO_SHA256=y 230CONFIG_CRYPTO_SHA256=y
246CONFIG_CRYPTO_SHA512=y 231CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index fdfa84dc908f..b0974e7e98ae 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -7,7 +7,9 @@ CONFIG_SYSVIPC=y
7CONFIG_POSIX_MQUEUE=y 7CONFIG_POSIX_MQUEUE=y
8CONFIG_BSD_PROCESS_ACCT=y 8CONFIG_BSD_PROCESS_ACCT=y
9CONFIG_AUDIT=y 9CONFIG_AUDIT=y
10CONFIG_SPARSE_IRQ=y 10CONFIG_IRQ_DOMAIN_DEBUG=y
11CONFIG_NO_HZ=y
12CONFIG_HIGH_RES_TIMERS=y
11CONFIG_IKCONFIG=y 13CONFIG_IKCONFIG=y
12CONFIG_IKCONFIG_PROC=y 14CONFIG_IKCONFIG_PROC=y
13CONFIG_LOG_BUF_SHIFT=14 15CONFIG_LOG_BUF_SHIFT=14
@@ -19,6 +21,8 @@ CONFIG_MODULE_UNLOAD=y
19CONFIG_MODULE_FORCE_UNLOAD=y 21CONFIG_MODULE_FORCE_UNLOAD=y
20CONFIG_MODVERSIONS=y 22CONFIG_MODVERSIONS=y
21# CONFIG_BLK_DEV_BSG is not set 23# CONFIG_BLK_DEV_BSG is not set
24CONFIG_PARTITION_ADVANCED=y
25CONFIG_MAC_PARTITION=y
22CONFIG_MPC8540_ADS=y 26CONFIG_MPC8540_ADS=y
23CONFIG_MPC8560_ADS=y 27CONFIG_MPC8560_ADS=y
24CONFIG_MPC85xx_CDS=y 28CONFIG_MPC85xx_CDS=y
@@ -42,8 +46,6 @@ CONFIG_SBC8548=y
42CONFIG_QUICC_ENGINE=y 46CONFIG_QUICC_ENGINE=y
43CONFIG_QE_GPIO=y 47CONFIG_QE_GPIO=y
44CONFIG_HIGHMEM=y 48CONFIG_HIGHMEM=y
45CONFIG_NO_HZ=y
46CONFIG_HIGH_RES_TIMERS=y
47CONFIG_BINFMT_MISC=m 49CONFIG_BINFMT_MISC=m
48CONFIG_MATH_EMULATION=y 50CONFIG_MATH_EMULATION=y
49CONFIG_IRQ_ALL_CPUS=y 51CONFIG_IRQ_ALL_CPUS=y
@@ -77,36 +79,25 @@ CONFIG_INET_ESP=y
77CONFIG_IPV6=y 79CONFIG_IPV6=y
78CONFIG_IP_SCTP=m 80CONFIG_IP_SCTP=m
79CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 81CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
82CONFIG_DEVTMPFS=y
80CONFIG_MTD=y 83CONFIG_MTD=y
81CONFIG_MTD_CMDLINE_PARTS=y 84CONFIG_MTD_CMDLINE_PARTS=y
82CONFIG_MTD_CHAR=y 85CONFIG_MTD_CHAR=y
83CONFIG_MTD_BLOCK=y 86CONFIG_MTD_BLOCK=y
84CONFIG_MTD_CFI=y
85CONFIG_FTL=y 87CONFIG_FTL=y
86CONFIG_MTD_GEN_PROBE=y 88CONFIG_MTD_CFI=y
87CONFIG_MTD_MAP_BANK_WIDTH_1=y
88CONFIG_MTD_MAP_BANK_WIDTH_2=y
89CONFIG_MTD_MAP_BANK_WIDTH_4=y
90CONFIG_MTD_CFI_I1=y
91CONFIG_MTD_CFI_I2=y
92CONFIG_MTD_CFI_INTELEXT=y 89CONFIG_MTD_CFI_INTELEXT=y
93CONFIG_MTD_CFI_AMDSTD=y 90CONFIG_MTD_CFI_AMDSTD=y
94CONFIG_MTD_CFI_UTIL=y
95CONFIG_MTD_PHYSMAP_OF=y 91CONFIG_MTD_PHYSMAP_OF=y
96CONFIG_MTD_PARTITIONS=y 92CONFIG_MTD_M25P80=y
97CONFIG_MTD_OF_PARTS=y
98CONFIG_MTD_NAND=y 93CONFIG_MTD_NAND=y
99CONFIG_MTD_NAND_FSL_ELBC=y 94CONFIG_MTD_NAND_FSL_ELBC=y
100CONFIG_MTD_NAND_FSL_IFC=y 95CONFIG_MTD_NAND_FSL_IFC=y
101CONFIG_MTD_NAND_IDS=y
102CONFIG_MTD_NAND_ECC=y
103CONFIG_MTD_M25P80=y
104CONFIG_PROC_DEVICETREE=y 96CONFIG_PROC_DEVICETREE=y
105CONFIG_BLK_DEV_LOOP=y 97CONFIG_BLK_DEV_LOOP=y
106CONFIG_BLK_DEV_NBD=y 98CONFIG_BLK_DEV_NBD=y
107CONFIG_BLK_DEV_RAM=y 99CONFIG_BLK_DEV_RAM=y
108CONFIG_BLK_DEV_RAM_SIZE=131072 100CONFIG_BLK_DEV_RAM_SIZE=131072
109CONFIG_MISC_DEVICES=y
110CONFIG_EEPROM_LEGACY=y 101CONFIG_EEPROM_LEGACY=y
111CONFIG_BLK_DEV_SD=y 102CONFIG_BLK_DEV_SD=y
112CONFIG_CHR_DEV_ST=y 103CONFIG_CHR_DEV_ST=y
@@ -137,7 +128,6 @@ CONFIG_SERIAL_8250=y
137CONFIG_SERIAL_8250_CONSOLE=y 128CONFIG_SERIAL_8250_CONSOLE=y
138CONFIG_SERIAL_8250_NR_UARTS=2 129CONFIG_SERIAL_8250_NR_UARTS=2
139CONFIG_SERIAL_8250_RUNTIME_UARTS=2 130CONFIG_SERIAL_8250_RUNTIME_UARTS=2
140CONFIG_SERIAL_8250_EXTENDED=y
141CONFIG_SERIAL_8250_MANY_PORTS=y 131CONFIG_SERIAL_8250_MANY_PORTS=y
142CONFIG_SERIAL_8250_DETECT_IRQ=y 132CONFIG_SERIAL_8250_DETECT_IRQ=y
143CONFIG_SERIAL_8250_RSA=y 133CONFIG_SERIAL_8250_RSA=y
@@ -186,7 +176,6 @@ CONFIG_HID_SAMSUNG=y
186CONFIG_HID_SONY=y 176CONFIG_HID_SONY=y
187CONFIG_HID_SUNPLUS=y 177CONFIG_HID_SUNPLUS=y
188CONFIG_USB=y 178CONFIG_USB=y
189CONFIG_USB_DEVICEFS=y
190CONFIG_USB_MON=y 179CONFIG_USB_MON=y
191CONFIG_USB_EHCI_HCD=y 180CONFIG_USB_EHCI_HCD=y
192CONFIG_USB_EHCI_FSL=y 181CONFIG_USB_EHCI_FSL=y
@@ -232,18 +221,13 @@ CONFIG_QNX4FS_FS=m
232CONFIG_SYSV_FS=m 221CONFIG_SYSV_FS=m
233CONFIG_UFS_FS=m 222CONFIG_UFS_FS=m
234CONFIG_NFS_FS=y 223CONFIG_NFS_FS=y
235CONFIG_NFS_V3=y
236CONFIG_NFS_V4=y 224CONFIG_NFS_V4=y
237CONFIG_ROOT_NFS=y 225CONFIG_ROOT_NFS=y
238CONFIG_NFSD=y 226CONFIG_NFSD=y
239CONFIG_PARTITION_ADVANCED=y
240CONFIG_MAC_PARTITION=y
241CONFIG_CRC_T10DIF=y 227CONFIG_CRC_T10DIF=y
242CONFIG_DEBUG_FS=y 228CONFIG_DEBUG_FS=y
243CONFIG_DETECT_HUNG_TASK=y 229CONFIG_DETECT_HUNG_TASK=y
244CONFIG_DEBUG_INFO=y 230CONFIG_DEBUG_INFO=y
245CONFIG_SYSCTL_SYSCALL_CHECK=y
246CONFIG_IRQ_DOMAIN_DEBUG=y
247CONFIG_CRYPTO_PCBC=m 231CONFIG_CRYPTO_PCBC=m
248CONFIG_CRYPTO_SHA256=y 232CONFIG_CRYPTO_SHA256=y
249CONFIG_CRYPTO_SHA512=y 233CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 50d82c8a037f..b3c083de17ad 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -553,9 +553,7 @@ static inline int cpu_has_feature(unsigned long feature)
553 & feature); 553 & feature);
554} 554}
555 555
556#ifdef CONFIG_HAVE_HW_BREAKPOINT
557#define HBP_NUM 1 556#define HBP_NUM 1
558#endif /* CONFIG_HAVE_HW_BREAKPOINT */
559 557
560#endif /* !__ASSEMBLY__ */ 558#endif /* !__ASSEMBLY__ */
561 559
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 50ea12fd7bf5..a8bf5c673a3c 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -33,6 +33,7 @@
33#include <asm/kvm_asm.h> 33#include <asm/kvm_asm.h>
34#include <asm/processor.h> 34#include <asm/processor.h>
35#include <asm/page.h> 35#include <asm/page.h>
36#include <asm/cacheflush.h>
36 37
37#define KVM_MAX_VCPUS NR_CPUS 38#define KVM_MAX_VCPUS NR_CPUS
38#define KVM_MAX_VCORES NR_CPUS 39#define KVM_MAX_VCORES NR_CPUS
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 0124937a23b9..e006f0bdea95 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -219,4 +219,16 @@ void kvmppc_claim_lpid(long lpid);
219void kvmppc_free_lpid(long lpid); 219void kvmppc_free_lpid(long lpid);
220void kvmppc_init_lpid(unsigned long nr_lpids); 220void kvmppc_init_lpid(unsigned long nr_lpids);
221 221
222static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
223{
224 /* Clear i-cache for new pages */
225 struct page *page;
226 page = pfn_to_page(pfn);
227 if (!test_bit(PG_arch_1, &page->flags)) {
228 flush_dcache_icache_page(page);
229 set_bit(PG_arch_1, &page->flags);
230 }
231}
232
233
222#endif /* __POWERPC_KVM_PPC_H__ */ 234#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h
index 326d33ca55cd..d4f471fb1031 100644
--- a/arch/powerpc/include/asm/mpic_msgr.h
+++ b/arch/powerpc/include/asm/mpic_msgr.h
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <asm/smp.h> 16#include <asm/smp.h>
17#include <asm/io.h>
17 18
18struct mpic_msgr { 19struct mpic_msgr {
19 u32 __iomem *base; 20 u32 __iomem *base;
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 2d7bb8ced136..e4897523de41 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -83,11 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
83 return 0; 83 return 0;
84 } 84 }
85 85
86 if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) { 86 if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) {
87 dev_info(dev, "Warning: IOMMU window too big for device mask\n"); 87 dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
88 dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n", 88 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
89 mask, (tbl->it_offset + tbl->it_size) << 89 mask, tbl->it_offset << IOMMU_PAGE_SHIFT);
90 IOMMU_PAGE_SHIFT);
91 return 0; 90 return 0;
92 } else 91 } else
93 return 1; 92 return 1;
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index f3a82dde61db..956a4c496de9 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -253,7 +253,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
253 253
254 /* Do not emulate user-space instructions, instead single-step them */ 254 /* Do not emulate user-space instructions, instead single-step them */
255 if (user_mode(regs)) { 255 if (user_mode(regs)) {
256 bp->ctx->task->thread.last_hit_ubp = bp; 256 current->thread.last_hit_ubp = bp;
257 regs->msr |= MSR_SE; 257 regs->msr |= MSR_SE;
258 goto out; 258 goto out;
259 } 259 }
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 782bd0a3c2f0..c470a40b29f5 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -25,6 +25,7 @@
25#include <asm/processor.h> 25#include <asm/processor.h>
26#include <asm/machdep.h> 26#include <asm/machdep.h>
27#include <asm/debug.h> 27#include <asm/debug.h>
28#include <linux/slab.h>
28 29
29/* 30/*
30 * This table contains the mapping between PowerPC hardware trap types, and 31 * This table contains the mapping between PowerPC hardware trap types, and
@@ -101,6 +102,21 @@ static int computeSignal(unsigned int tt)
101 return SIGHUP; /* default for things we don't know about */ 102 return SIGHUP; /* default for things we don't know about */
102} 103}
103 104
105/**
106 *
107 * kgdb_skipexception - Bail out of KGDB when we've been triggered.
108 * @exception: Exception vector number
109 * @regs: Current &struct pt_regs.
110 *
111 * On some architectures we need to skip a breakpoint exception when
112 * it occurs after a breakpoint has been removed.
113 *
114 */
115int kgdb_skipexception(int exception, struct pt_regs *regs)
116{
117 return kgdb_isremovedbreak(regs->nip);
118}
119
104static int kgdb_call_nmi_hook(struct pt_regs *regs) 120static int kgdb_call_nmi_hook(struct pt_regs *regs)
105{ 121{
106 kgdb_nmicallback(raw_smp_processor_id(), regs); 122 kgdb_nmicallback(raw_smp_processor_id(), regs);
@@ -138,6 +154,8 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
138static int kgdb_singlestep(struct pt_regs *regs) 154static int kgdb_singlestep(struct pt_regs *regs)
139{ 155{
140 struct thread_info *thread_info, *exception_thread_info; 156 struct thread_info *thread_info, *exception_thread_info;
157 struct thread_info *backup_current_thread_info = \
158 (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
141 159
142 if (user_mode(regs)) 160 if (user_mode(regs))
143 return 0; 161 return 0;
@@ -155,13 +173,17 @@ static int kgdb_singlestep(struct pt_regs *regs)
155 thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1)); 173 thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
156 exception_thread_info = current_thread_info(); 174 exception_thread_info = current_thread_info();
157 175
158 if (thread_info != exception_thread_info) 176 if (thread_info != exception_thread_info) {
177 /* Save the original current_thread_info. */
178 memcpy(backup_current_thread_info, exception_thread_info, sizeof *thread_info);
159 memcpy(exception_thread_info, thread_info, sizeof *thread_info); 179 memcpy(exception_thread_info, thread_info, sizeof *thread_info);
180 }
160 181
161 kgdb_handle_exception(0, SIGTRAP, 0, regs); 182 kgdb_handle_exception(0, SIGTRAP, 0, regs);
162 183
163 if (thread_info != exception_thread_info) 184 if (thread_info != exception_thread_info)
164 memcpy(thread_info, exception_thread_info, sizeof *thread_info); 185 /* Restore current_thread_info lastly. */
186 memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);
165 187
166 return 1; 188 return 1;
167} 189}
@@ -410,7 +432,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
410#else 432#else
411 linux_regs->msr |= MSR_SE; 433 linux_regs->msr |= MSR_SE;
412#endif 434#endif
413 kgdb_single_step = 1;
414 atomic_set(&kgdb_cpu_doing_single_step, 435 atomic_set(&kgdb_cpu_doing_single_step,
415 raw_smp_processor_id()); 436 raw_smp_processor_id());
416 } 437 }
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index f2496f2faecc..4e3cc47f26b9 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)
107 long ret; 107 long ret;
108 108
109 if (personality(current->personality) == PER_LINUX32 109 if (personality(current->personality) == PER_LINUX32
110 && personality == PER_LINUX) 110 && personality(personality) == PER_LINUX)
111 personality = PER_LINUX32; 111 personality = (personality & ~PER_MASK) | PER_LINUX32;
112 ret = sys_personality(personality); 112 ret = sys_personality(personality);
113 if (ret == PER_LINUX32) 113 if (personality(ret) == PER_LINUX32)
114 ret = PER_LINUX; 114 ret = (ret & ~PER_MASK) | PER_LINUX;
115 return ret; 115 return ret;
116} 116}
117#endif 117#endif
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index f922c29bb234..837f13e7b6bf 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -211,6 +211,9 @@ next_pteg:
211 pteg1 |= PP_RWRX; 211 pteg1 |= PP_RWRX;
212 } 212 }
213 213
214 if (orig_pte->may_execute)
215 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
216
214 local_irq_disable(); 217 local_irq_disable();
215 218
216 if (pteg[rr]) { 219 if (pteg[rr]) {
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 10fc8ec9d2a8..0688b6b39585 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -126,6 +126,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
126 126
127 if (!orig_pte->may_execute) 127 if (!orig_pte->may_execute)
128 rflags |= HPTE_R_N; 128 rflags |= HPTE_R_N;
129 else
130 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
129 131
130 hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M); 132 hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
131 133
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 5a84c8d3d040..44b72feaff7d 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1421,13 +1421,13 @@ _GLOBAL(kvmppc_h_cede)
1421 sync /* order setting ceded vs. testing prodded */ 1421 sync /* order setting ceded vs. testing prodded */
1422 lbz r5,VCPU_PRODDED(r3) 1422 lbz r5,VCPU_PRODDED(r3)
1423 cmpwi r5,0 1423 cmpwi r5,0
1424 bne 1f 1424 bne kvm_cede_prodded
1425 li r0,0 /* set trap to 0 to say hcall is handled */ 1425 li r0,0 /* set trap to 0 to say hcall is handled */
1426 stw r0,VCPU_TRAP(r3) 1426 stw r0,VCPU_TRAP(r3)
1427 li r0,H_SUCCESS 1427 li r0,H_SUCCESS
1428 std r0,VCPU_GPR(R3)(r3) 1428 std r0,VCPU_GPR(R3)(r3)
1429BEGIN_FTR_SECTION 1429BEGIN_FTR_SECTION
1430 b 2f /* just send it up to host on 970 */ 1430 b kvm_cede_exit /* just send it up to host on 970 */
1431END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 1431END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1432 1432
1433 /* 1433 /*
@@ -1446,7 +1446,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1446 or r4,r4,r0 1446 or r4,r4,r0
1447 PPC_POPCNTW(R7,R4) 1447 PPC_POPCNTW(R7,R4)
1448 cmpw r7,r8 1448 cmpw r7,r8
1449 bge 2f 1449 bge kvm_cede_exit
1450 stwcx. r4,0,r6 1450 stwcx. r4,0,r6
1451 bne 31b 1451 bne 31b
1452 li r0,1 1452 li r0,1
@@ -1555,7 +1555,8 @@ kvm_end_cede:
1555 b hcall_real_fallback 1555 b hcall_real_fallback
1556 1556
1557 /* cede when already previously prodded case */ 1557 /* cede when already previously prodded case */
15581: li r0,0 1558kvm_cede_prodded:
1559 li r0,0
1559 stb r0,VCPU_PRODDED(r3) 1560 stb r0,VCPU_PRODDED(r3)
1560 sync /* order testing prodded vs. clearing ceded */ 1561 sync /* order testing prodded vs. clearing ceded */
1561 stb r0,VCPU_CEDED(r3) 1562 stb r0,VCPU_CEDED(r3)
@@ -1563,7 +1564,8 @@ kvm_end_cede:
1563 blr 1564 blr
1564 1565
1565 /* we've ceded but we want to give control to the host */ 1566 /* we've ceded but we want to give control to the host */
15662: li r3,H_TOO_HARD 1567kvm_cede_exit:
1568 li r3,H_TOO_HARD
1567 blr 1569 blr
1568 1570
1569secondary_too_late: 1571secondary_too_late:
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index c510fc961302..a2b66717813d 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -322,11 +322,11 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
322static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) 322static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
323{ 323{
324 if (vcpu_e500->g2h_tlb1_map) 324 if (vcpu_e500->g2h_tlb1_map)
325 memset(vcpu_e500->g2h_tlb1_map, 325 memset(vcpu_e500->g2h_tlb1_map, 0,
326 sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0); 326 sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
327 if (vcpu_e500->h2g_tlb1_rmap) 327 if (vcpu_e500->h2g_tlb1_rmap)
328 memset(vcpu_e500->h2g_tlb1_rmap, 328 memset(vcpu_e500->h2g_tlb1_rmap, 0,
329 sizeof(unsigned int) * host_tlb_params[1].entries, 0); 329 sizeof(unsigned int) * host_tlb_params[1].entries);
330} 330}
331 331
332static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) 332static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -539,6 +539,9 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
539 539
540 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 540 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
541 ref, gvaddr, stlbe); 541 ref, gvaddr, stlbe);
542
543 /* Clear i-cache for new pages */
544 kvmppc_mmu_flush_icache(pfn);
542} 545}
543 546
544/* XXX only map the one-one case, for now use TLB0 */ 547/* XXX only map the one-one case, for now use TLB0 */
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index f9ede7c6606e..0d24ff15f5f6 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -288,7 +288,7 @@ err1; stb r0,0(r3)
288 std r0,16(r1) 288 std r0,16(r1)
289 stdu r1,-STACKFRAMESIZE(r1) 289 stdu r1,-STACKFRAMESIZE(r1)
290 bl .enter_vmx_usercopy 290 bl .enter_vmx_usercopy
291 cmpwi r3,0 291 cmpwi cr1,r3,0
292 ld r0,STACKFRAMESIZE+16(r1) 292 ld r0,STACKFRAMESIZE+16(r1)
293 ld r3,STACKFRAMESIZE+48(r1) 293 ld r3,STACKFRAMESIZE+48(r1)
294 ld r4,STACKFRAMESIZE+56(r1) 294 ld r4,STACKFRAMESIZE+56(r1)
@@ -326,38 +326,7 @@ err1; stb r0,0(r3)
326 dcbt r0,r8,0b01010 /* GO */ 326 dcbt r0,r8,0b01010 /* GO */
327.machine pop 327.machine pop
328 328
329 /* 329 beq cr1,.Lunwind_stack_nonvmx_copy
330 * We prefetch both the source and destination using enhanced touch
331 * instructions. We use a stream ID of 0 for the load side and
332 * 1 for the store side.
333 */
334 clrrdi r6,r4,7
335 clrrdi r9,r3,7
336 ori r9,r9,1 /* stream=1 */
337
338 srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
339 cmpldi cr1,r7,0x3FF
340 ble cr1,1f
341 li r7,0x3FF
3421: lis r0,0x0E00 /* depth=7 */
343 sldi r7,r7,7
344 or r7,r7,r0
345 ori r10,r7,1 /* stream=1 */
346
347 lis r8,0x8000 /* GO=1 */
348 clrldi r8,r8,32
349
350.machine push
351.machine "power4"
352 dcbt r0,r6,0b01000
353 dcbt r0,r7,0b01010
354 dcbtst r0,r9,0b01000
355 dcbtst r0,r10,0b01010
356 eieio
357 dcbt r0,r8,0b01010 /* GO */
358.machine pop
359
360 beq .Lunwind_stack_nonvmx_copy
361 330
362 /* 331 /*
363 * If source and destination are not relatively aligned we use a 332 * If source and destination are not relatively aligned we use a
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 0efdc51bc716..7ba6c96de778 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -222,7 +222,7 @@ _GLOBAL(memcpy_power7)
222 std r0,16(r1) 222 std r0,16(r1)
223 stdu r1,-STACKFRAMESIZE(r1) 223 stdu r1,-STACKFRAMESIZE(r1)
224 bl .enter_vmx_copy 224 bl .enter_vmx_copy
225 cmpwi r3,0 225 cmpwi cr1,r3,0
226 ld r0,STACKFRAMESIZE+16(r1) 226 ld r0,STACKFRAMESIZE+16(r1)
227 ld r3,STACKFRAMESIZE+48(r1) 227 ld r3,STACKFRAMESIZE+48(r1)
228 ld r4,STACKFRAMESIZE+56(r1) 228 ld r4,STACKFRAMESIZE+56(r1)
@@ -260,7 +260,7 @@ _GLOBAL(memcpy_power7)
260 dcbt r0,r8,0b01010 /* GO */ 260 dcbt r0,r8,0b01010 /* GO */
261.machine pop 261.machine pop
262 262
263 beq .Lunwind_stack_nonvmx_copy 263 beq cr1,.Lunwind_stack_nonvmx_copy
264 264
265 /* 265 /*
266 * If source and destination are not relatively aligned we use a 266 * If source and destination are not relatively aligned we use a
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index baaafde7d135..fbdad0e3929a 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -469,6 +469,7 @@ void flush_dcache_icache_page(struct page *page)
469 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 469 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
470#endif 470#endif
471} 471}
472EXPORT_SYMBOL(flush_dcache_icache_page);
472 473
473void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 474void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
474{ 475{
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 77b49ddda9d3..7cd2dbd6e4c4 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1431,7 +1431,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
1431 if (!event->hw.idx || is_limited_pmc(event->hw.idx)) 1431 if (!event->hw.idx || is_limited_pmc(event->hw.idx))
1432 continue; 1432 continue;
1433 val = read_pmc(event->hw.idx); 1433 val = read_pmc(event->hw.idx);
1434 if ((int)val < 0) { 1434 if (pmc_overflow(val)) {
1435 /* event has overflowed */ 1435 /* event has overflowed */
1436 found = 1; 1436 found = 1;
1437 record_and_restart(event, val, regs); 1437 record_and_restart(event, val, regs);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index a7b2a600d0a4..c37f46136321 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -465,7 +465,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
465 iounmap(hose->cfg_data); 465 iounmap(hose->cfg_data);
466 iounmap(hose->cfg_addr); 466 iounmap(hose->cfg_addr);
467 pcibios_free_controller(hose); 467 pcibios_free_controller(hose);
468 return 0; 468 return -ENODEV;
469 } 469 }
470 470
471 setup_pci_cmd(hose); 471 setup_pci_cmd(hose);
@@ -827,6 +827,7 @@ struct device_node *fsl_pci_primary;
827 827
828void __devinit fsl_pci_init(void) 828void __devinit fsl_pci_init(void)
829{ 829{
830 int ret;
830 struct device_node *node; 831 struct device_node *node;
831 struct pci_controller *hose; 832 struct pci_controller *hose;
832 dma_addr_t max = 0xffffffff; 833 dma_addr_t max = 0xffffffff;
@@ -855,10 +856,12 @@ void __devinit fsl_pci_init(void)
855 if (!fsl_pci_primary) 856 if (!fsl_pci_primary)
856 fsl_pci_primary = node; 857 fsl_pci_primary = node;
857 858
858 fsl_add_bridge(node, fsl_pci_primary == node); 859 ret = fsl_add_bridge(node, fsl_pci_primary == node);
859 hose = pci_find_hose_for_OF_device(node); 860 if (ret == 0) {
860 max = min(max, hose->dma_window_base_cur + 861 hose = pci_find_hose_for_OF_device(node);
861 hose->dma_window_size); 862 max = min(max, hose->dma_window_base_cur +
863 hose->dma_window_size);
864 }
862 } 865 }
863 } 866 }
864 867
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index 483d8fa72e8b..e961f8c4a8f0 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -14,6 +14,9 @@
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/of_platform.h> 15#include <linux/of_platform.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/err.h>
18#include <linux/export.h>
19#include <linux/slab.h>
17#include <asm/prom.h> 20#include <asm/prom.h>
18#include <asm/hw_irq.h> 21#include <asm/hw_irq.h>
19#include <asm/ppc-pci.h> 22#include <asm/ppc-pci.h>
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index eab3492a45c5..9b49c65ee7a4 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -17,6 +17,7 @@
17#include <linux/reboot.h> 17#include <linux/reboot.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/kallsyms.h> 19#include <linux/kallsyms.h>
20#include <linux/kmsg_dump.h>
20#include <linux/cpumask.h> 21#include <linux/cpumask.h>
21#include <linux/export.h> 22#include <linux/export.h>
22#include <linux/sysrq.h> 23#include <linux/sysrq.h>
@@ -894,13 +895,13 @@ cmds(struct pt_regs *excp)
894#endif 895#endif
895 default: 896 default:
896 printf("Unrecognized command: "); 897 printf("Unrecognized command: ");
897 do { 898 do {
898 if (' ' < cmd && cmd <= '~') 899 if (' ' < cmd && cmd <= '~')
899 putchar(cmd); 900 putchar(cmd);
900 else 901 else
901 printf("\\x%x", cmd); 902 printf("\\x%x", cmd);
902 cmd = inchar(); 903 cmd = inchar();
903 } while (cmd != '\n'); 904 } while (cmd != '\n');
904 printf(" (type ? for help)\n"); 905 printf(" (type ? for help)\n");
905 break; 906 break;
906 } 907 }
@@ -1097,7 +1098,7 @@ static long check_bp_loc(unsigned long addr)
1097 return 1; 1098 return 1;
1098} 1099}
1099 1100
1100static char *breakpoint_help_string = 1101static char *breakpoint_help_string =
1101 "Breakpoint command usage:\n" 1102 "Breakpoint command usage:\n"
1102 "b show breakpoints\n" 1103 "b show breakpoints\n"
1103 "b <addr> [cnt] set breakpoint at given instr addr\n" 1104 "b <addr> [cnt] set breakpoint at given instr addr\n"
@@ -1193,7 +1194,7 @@ bpt_cmds(void)
1193 1194
1194 default: 1195 default:
1195 termch = cmd; 1196 termch = cmd;
1196 cmd = skipbl(); 1197 cmd = skipbl();
1197 if (cmd == '?') { 1198 if (cmd == '?') {
1198 printf(breakpoint_help_string); 1199 printf(breakpoint_help_string);
1199 break; 1200 break;
@@ -1359,7 +1360,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
1359 sp + REGS_OFFSET); 1360 sp + REGS_OFFSET);
1360 break; 1361 break;
1361 } 1362 }
1362 printf("--- Exception: %lx %s at ", regs.trap, 1363 printf("--- Exception: %lx %s at ", regs.trap,
1363 getvecname(TRAP(&regs))); 1364 getvecname(TRAP(&regs)));
1364 pc = regs.nip; 1365 pc = regs.nip;
1365 lr = regs.link; 1366 lr = regs.link;
@@ -1623,14 +1624,14 @@ static void super_regs(void)
1623 1624
1624 cmd = skipbl(); 1625 cmd = skipbl();
1625 if (cmd == '\n') { 1626 if (cmd == '\n') {
1626 unsigned long sp, toc; 1627 unsigned long sp, toc;
1627 asm("mr %0,1" : "=r" (sp) :); 1628 asm("mr %0,1" : "=r" (sp) :);
1628 asm("mr %0,2" : "=r" (toc) :); 1629 asm("mr %0,2" : "=r" (toc) :);
1629 1630
1630 printf("msr = "REG" sprg0= "REG"\n", 1631 printf("msr = "REG" sprg0= "REG"\n",
1631 mfmsr(), mfspr(SPRN_SPRG0)); 1632 mfmsr(), mfspr(SPRN_SPRG0));
1632 printf("pvr = "REG" sprg1= "REG"\n", 1633 printf("pvr = "REG" sprg1= "REG"\n",
1633 mfspr(SPRN_PVR), mfspr(SPRN_SPRG1)); 1634 mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));
1634 printf("dec = "REG" sprg2= "REG"\n", 1635 printf("dec = "REG" sprg2= "REG"\n",
1635 mfspr(SPRN_DEC), mfspr(SPRN_SPRG2)); 1636 mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));
1636 printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3)); 1637 printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
@@ -1783,7 +1784,7 @@ byterev(unsigned char *val, int size)
1783static int brev; 1784static int brev;
1784static int mnoread; 1785static int mnoread;
1785 1786
1786static char *memex_help_string = 1787static char *memex_help_string =
1787 "Memory examine command usage:\n" 1788 "Memory examine command usage:\n"
1788 "m [addr] [flags] examine/change memory\n" 1789 "m [addr] [flags] examine/change memory\n"
1789 " addr is optional. will start where left off.\n" 1790 " addr is optional. will start where left off.\n"
@@ -1798,7 +1799,7 @@ static char *memex_help_string =
1798 "NOTE: flags are saved as defaults\n" 1799 "NOTE: flags are saved as defaults\n"
1799 ""; 1800 "";
1800 1801
1801static char *memex_subcmd_help_string = 1802static char *memex_subcmd_help_string =
1802 "Memory examine subcommands:\n" 1803 "Memory examine subcommands:\n"
1803 " hexval write this val to current location\n" 1804 " hexval write this val to current location\n"
1804 " 'string' write chars from string to this location\n" 1805 " 'string' write chars from string to this location\n"
@@ -2064,7 +2065,7 @@ prdump(unsigned long adrs, long ndump)
2064 nr = mread(adrs, temp, r); 2065 nr = mread(adrs, temp, r);
2065 adrs += nr; 2066 adrs += nr;
2066 for (m = 0; m < r; ++m) { 2067 for (m = 0; m < r; ++m) {
2067 if ((m & (sizeof(long) - 1)) == 0 && m > 0) 2068 if ((m & (sizeof(long) - 1)) == 0 && m > 0)
2068 putchar(' '); 2069 putchar(' ');
2069 if (m < nr) 2070 if (m < nr)
2070 printf("%.2x", temp[m]); 2071 printf("%.2x", temp[m]);
@@ -2072,7 +2073,7 @@ prdump(unsigned long adrs, long ndump)
2072 printf("%s", fault_chars[fault_type]); 2073 printf("%s", fault_chars[fault_type]);
2073 } 2074 }
2074 for (; m < 16; ++m) { 2075 for (; m < 16; ++m) {
2075 if ((m & (sizeof(long) - 1)) == 0) 2076 if ((m & (sizeof(long) - 1)) == 0)
2076 putchar(' '); 2077 putchar(' ');
2077 printf(" "); 2078 printf(" ");
2078 } 2079 }
@@ -2148,45 +2149,28 @@ print_address(unsigned long addr)
2148void 2149void
2149dump_log_buf(void) 2150dump_log_buf(void)
2150{ 2151{
2151 const unsigned long size = 128; 2152 struct kmsg_dumper dumper = { .active = 1 };
2152 unsigned long end, addr; 2153 unsigned char buf[128];
2153 unsigned char buf[size + 1]; 2154 size_t len;
2154 2155
2155 addr = 0; 2156 if (setjmp(bus_error_jmp) != 0) {
2156 buf[size] = '\0'; 2157 printf("Error dumping printk buffer!\n");
2157 2158 return;
2158 if (setjmp(bus_error_jmp) != 0) { 2159 }
2159 printf("Unable to lookup symbol __log_buf!\n"); 2160
2160 return; 2161 catch_memory_errors = 1;
2161 } 2162 sync();
2162 2163
2163 catch_memory_errors = 1; 2164 kmsg_dump_rewind_nolock(&dumper);
2164 sync(); 2165 while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) {
2165 addr = kallsyms_lookup_name("__log_buf"); 2166 buf[len] = '\0';
2166 2167 printf("%s", buf);
2167 if (! addr) 2168 }
2168 printf("Symbol __log_buf not found!\n"); 2169
2169 else { 2170 sync();
2170 end = addr + (1 << CONFIG_LOG_BUF_SHIFT); 2171 /* wait a little while to see if we get a machine check */
2171 while (addr < end) { 2172 __delay(200);
2172 if (! mread(addr, buf, size)) { 2173 catch_memory_errors = 0;
2173 printf("Can't read memory at address 0x%lx\n", addr);
2174 break;
2175 }
2176
2177 printf("%s", buf);
2178
2179 if (strlen(buf) < size)
2180 break;
2181
2182 addr += size;
2183 }
2184 }
2185
2186 sync();
2187 /* wait a little while to see if we get a machine check */
2188 __delay(200);
2189 catch_memory_errors = 0;
2190} 2174}
2191 2175
2192/* 2176/*
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 32e8449640fa..9b94a160fe7f 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -180,7 +180,8 @@ extern char elf_platform[];
180#define ELF_PLATFORM (elf_platform) 180#define ELF_PLATFORM (elf_platform)
181 181
182#ifndef CONFIG_64BIT 182#ifndef CONFIG_64BIT
183#define SET_PERSONALITY(ex) set_personality(PER_LINUX) 183#define SET_PERSONALITY(ex) \
184 set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
184#else /* CONFIG_64BIT */ 185#else /* CONFIG_64BIT */
185#define SET_PERSONALITY(ex) \ 186#define SET_PERSONALITY(ex) \
186do { \ 187do { \
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index 7bcc14e395f0..bf2a2ad2f800 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15typedef unsigned long __kernel_size_t; 15typedef unsigned long __kernel_size_t;
16typedef long __kernel_ssize_t;
16#define __kernel_size_t __kernel_size_t 17#define __kernel_size_t __kernel_size_t
17 18
18typedef unsigned short __kernel_old_dev_t; 19typedef unsigned short __kernel_old_dev_t;
@@ -25,7 +26,6 @@ typedef unsigned short __kernel_mode_t;
25typedef unsigned short __kernel_ipc_pid_t; 26typedef unsigned short __kernel_ipc_pid_t;
26typedef unsigned short __kernel_uid_t; 27typedef unsigned short __kernel_uid_t;
27typedef unsigned short __kernel_gid_t; 28typedef unsigned short __kernel_gid_t;
28typedef int __kernel_ssize_t;
29typedef int __kernel_ptrdiff_t; 29typedef int __kernel_ptrdiff_t;
30 30
31#else /* __s390x__ */ 31#else /* __s390x__ */
@@ -35,7 +35,6 @@ typedef unsigned int __kernel_mode_t;
35typedef int __kernel_ipc_pid_t; 35typedef int __kernel_ipc_pid_t;
36typedef unsigned int __kernel_uid_t; 36typedef unsigned int __kernel_uid_t;
37typedef unsigned int __kernel_gid_t; 37typedef unsigned int __kernel_gid_t;
38typedef long __kernel_ssize_t;
39typedef long __kernel_ptrdiff_t; 38typedef long __kernel_ptrdiff_t;
40typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ 39typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
41 40
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index a0a8340daafa..ce26ac3cb162 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -44,6 +44,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
44} 44}
45 45
46static inline int smp_find_processor_id(int address) { return 0; } 46static inline int smp_find_processor_id(int address) { return 0; }
47static inline int smp_store_status(int cpu) { return 0; }
47static inline int smp_vcpu_scheduled(int cpu) { return 1; } 48static inline int smp_vcpu_scheduled(int cpu) { return 1; }
48static inline void smp_yield_cpu(int cpu) { } 49static inline void smp_yield_cpu(int cpu) { }
49static inline void smp_yield(void) { } 50static inline void smp_yield(void) { }
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index b0c5276861ec..682e9c210baa 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -27,6 +27,10 @@ ifeq ($(CONFIG_X86_32),y)
27 27
28 KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return 28 KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
29 29
30 # Never want PIC in a 32-bit kernel, prevent breakage with GCC built
31 # with nonstandard options
32 KBUILD_CFLAGS += -fno-pic
33
30 # prevent gcc from keeping the stack 16 byte aligned 34 # prevent gcc from keeping the stack 16 byte aligned
31 KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2) 35 KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
32 36
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 5a747dd884db..f7535bedc33f 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -57,7 +57,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
57 -Wall -Wstrict-prototypes \ 57 -Wall -Wstrict-prototypes \
58 -march=i386 -mregparm=3 \ 58 -march=i386 -mregparm=3 \
59 -include $(srctree)/$(src)/code16gcc.h \ 59 -include $(srctree)/$(src)/code16gcc.h \
60 -fno-strict-aliasing -fomit-frame-pointer \ 60 -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
61 $(call cc-option, -ffreestanding) \ 61 $(call cc-option, -ffreestanding) \
62 $(call cc-option, -fno-toplevel-reorder,\ 62 $(call cc-option, -fno-toplevel-reorder,\
63 $(call cc-option, -fno-unit-at-a-time)) \ 63 $(call cc-option, -fno-unit-at-a-time)) \
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index b315a33867f2..33692eaabab5 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -12,8 +12,7 @@
12 * Simple spin lock operations. There are two variants, one clears IRQ's 12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not. 13 * on the local processor, one does not.
14 * 14 *
15 * These are fair FIFO ticket locks, which are currently limited to 256 15 * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
16 * CPUs.
17 * 16 *
18 * (the type definitions are in asm/spinlock_types.h) 17 * (the type definitions are in asm/spinlock_types.h)
19 */ 18 */
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index afb7ff79a29f..ced4534baed5 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -165,7 +165,7 @@ static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
165#endif 165#endif
166 166
167#ifdef P6_NOP1 167#ifdef P6_NOP1
168static const unsigned char __initconst_or_module p6nops[] = 168static const unsigned char p6nops[] =
169{ 169{
170 P6_NOP1, 170 P6_NOP1,
171 P6_NOP2, 171 P6_NOP2,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index a6c64aaddf9a..c265593ec2cd 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1356,6 +1356,16 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
1356 if (!IO_APIC_IRQ(irq)) 1356 if (!IO_APIC_IRQ(irq))
1357 return; 1357 return;
1358 1358
1359 /*
1360 * For legacy irqs, cfg->domain starts with cpu 0. Now that IO-APIC
1361 * can handle this irq and the apic driver is finialized at this point,
1362 * update the cfg->domain.
1363 */
1364 if (irq < legacy_pic->nr_legacy_irqs &&
1365 cpumask_equal(cfg->domain, cpumask_of(0)))
1366 apic->vector_allocation_domain(0, cfg->domain,
1367 apic->target_cpus());
1368
1359 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1369 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1360 return; 1370 return;
1361 1371
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 46d8786d655e..a5fbc3c5fccc 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -144,6 +144,8 @@ static int __init x86_xsave_setup(char *s)
144{ 144{
145 setup_clear_cpu_cap(X86_FEATURE_XSAVE); 145 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
146 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); 146 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
147 setup_clear_cpu_cap(X86_FEATURE_AVX);
148 setup_clear_cpu_cap(X86_FEATURE_AVX2);
147 return 1; 149 return 1;
148} 150}
149__setup("noxsave", x86_xsave_setup); 151__setup("noxsave", x86_xsave_setup);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 382366977d4c..7f2739e03e79 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1522,8 +1522,16 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1522 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; 1522 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1523 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; 1523 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1524 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; 1524 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
1525 /*
1526 * If PMU counter has PEBS enabled it is not enough to disable counter
1527 * on a guest entry since PEBS memory write can overshoot guest entry
1528 * and corrupt guest memory. Disabling PEBS solves the problem.
1529 */
1530 arr[1].msr = MSR_IA32_PEBS_ENABLE;
1531 arr[1].host = cpuc->pebs_enabled;
1532 arr[1].guest = 0;
1525 1533
1526 *nr = 1; 1534 *nr = 2;
1527 return arr; 1535 return arr;
1528} 1536}
1529 1537
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 7563fda9f033..0a5571080e74 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -796,7 +796,6 @@ static struct intel_uncore_type *nhm_msr_uncores[] = {
796 796
797DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); 797DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
798DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); 798DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
799DEFINE_UNCORE_FORMAT_ATTR(mm_cfg, mm_cfg, "config:63");
800DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); 799DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
801DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); 800DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
802 801
@@ -902,16 +901,21 @@ static struct attribute_group nhmex_uncore_cbox_format_group = {
902 .attrs = nhmex_uncore_cbox_formats_attr, 901 .attrs = nhmex_uncore_cbox_formats_attr,
903}; 902};
904 903
904/* msr offset for each instance of cbox */
905static unsigned nhmex_cbox_msr_offsets[] = {
906 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
907};
908
905static struct intel_uncore_type nhmex_uncore_cbox = { 909static struct intel_uncore_type nhmex_uncore_cbox = {
906 .name = "cbox", 910 .name = "cbox",
907 .num_counters = 6, 911 .num_counters = 6,
908 .num_boxes = 8, 912 .num_boxes = 10,
909 .perf_ctr_bits = 48, 913 .perf_ctr_bits = 48,
910 .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0, 914 .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
911 .perf_ctr = NHMEX_C0_MSR_PMON_CTR0, 915 .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
912 .event_mask = NHMEX_PMON_RAW_EVENT_MASK, 916 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
913 .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL, 917 .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
914 .msr_offset = NHMEX_C_MSR_OFFSET, 918 .msr_offsets = nhmex_cbox_msr_offsets,
915 .pair_ctr_ctl = 1, 919 .pair_ctr_ctl = 1,
916 .ops = &nhmex_uncore_ops, 920 .ops = &nhmex_uncore_ops,
917 .format_group = &nhmex_uncore_cbox_format_group 921 .format_group = &nhmex_uncore_cbox_format_group
@@ -1032,24 +1036,22 @@ static struct intel_uncore_type nhmex_uncore_bbox = {
1032 1036
1033static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 1037static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1034{ 1038{
1035 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 1039 struct hw_perf_event *hwc = &event->hw;
1036 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; 1040 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1041 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1037 1042
1038 if (event->attr.config & NHMEX_S_PMON_MM_CFG_EN) { 1043 /* only TO_R_PROG_EV event uses the match/mask register */
1039 reg1->config = event->attr.config1; 1044 if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
1040 reg2->config = event->attr.config2; 1045 NHMEX_S_EVENT_TO_R_PROG_EV)
1041 } else { 1046 return 0;
1042 reg1->config = ~0ULL;
1043 reg2->config = ~0ULL;
1044 }
1045 1047
1046 if (box->pmu->pmu_idx == 0) 1048 if (box->pmu->pmu_idx == 0)
1047 reg1->reg = NHMEX_S0_MSR_MM_CFG; 1049 reg1->reg = NHMEX_S0_MSR_MM_CFG;
1048 else 1050 else
1049 reg1->reg = NHMEX_S1_MSR_MM_CFG; 1051 reg1->reg = NHMEX_S1_MSR_MM_CFG;
1050
1051 reg1->idx = 0; 1052 reg1->idx = 0;
1052 1053 reg1->config = event->attr.config1;
1054 reg2->config = event->attr.config2;
1053 return 0; 1055 return 0;
1054} 1056}
1055 1057
@@ -1059,8 +1061,8 @@ static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct per
1059 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1061 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1060 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 1062 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1061 1063
1062 wrmsrl(reg1->reg, 0); 1064 if (reg1->idx != EXTRA_REG_NONE) {
1063 if (reg1->config != ~0ULL || reg2->config != ~0ULL) { 1065 wrmsrl(reg1->reg, 0);
1064 wrmsrl(reg1->reg + 1, reg1->config); 1066 wrmsrl(reg1->reg + 1, reg1->config);
1065 wrmsrl(reg1->reg + 2, reg2->config); 1067 wrmsrl(reg1->reg + 2, reg2->config);
1066 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); 1068 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
@@ -1074,7 +1076,6 @@ static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
1074 &format_attr_edge.attr, 1076 &format_attr_edge.attr,
1075 &format_attr_inv.attr, 1077 &format_attr_inv.attr,
1076 &format_attr_thresh8.attr, 1078 &format_attr_thresh8.attr,
1077 &format_attr_mm_cfg.attr,
1078 &format_attr_match.attr, 1079 &format_attr_match.attr,
1079 &format_attr_mask.attr, 1080 &format_attr_mask.attr,
1080 NULL, 1081 NULL,
@@ -1142,6 +1143,9 @@ static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
1142 EVENT_EXTRA_END 1143 EVENT_EXTRA_END
1143}; 1144};
1144 1145
1146/* Nehalem-EX or Westmere-EX ? */
1147bool uncore_nhmex;
1148
1145static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) 1149static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
1146{ 1150{
1147 struct intel_uncore_extra_reg *er; 1151 struct intel_uncore_extra_reg *er;
@@ -1171,18 +1175,29 @@ static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64
1171 return false; 1175 return false;
1172 1176
1173 /* mask of the shared fields */ 1177 /* mask of the shared fields */
1174 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; 1178 if (uncore_nhmex)
1179 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
1180 else
1181 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
1175 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; 1182 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1176 1183
1177 raw_spin_lock_irqsave(&er->lock, flags); 1184 raw_spin_lock_irqsave(&er->lock, flags);
1178 /* add mask of the non-shared field if it's in use */ 1185 /* add mask of the non-shared field if it's in use */
1179 if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) 1186 if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
1180 mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); 1187 if (uncore_nhmex)
1188 mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1189 else
1190 mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1191 }
1181 1192
1182 if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { 1193 if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
1183 atomic_add(1 << (idx * 8), &er->ref); 1194 atomic_add(1 << (idx * 8), &er->ref);
1184 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | 1195 if (uncore_nhmex)
1185 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); 1196 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
1197 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1198 else
1199 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
1200 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1186 er->config &= ~mask; 1201 er->config &= ~mask;
1187 er->config |= (config & mask); 1202 er->config |= (config & mask);
1188 ret = true; 1203 ret = true;
@@ -1216,7 +1231,10 @@ u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
1216 1231
1217 /* get the non-shared control bits and shift them */ 1232 /* get the non-shared control bits and shift them */
1218 idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; 1233 idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1219 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); 1234 if (uncore_nhmex)
1235 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1236 else
1237 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1220 if (new_idx > orig_idx) { 1238 if (new_idx > orig_idx) {
1221 idx = new_idx - orig_idx; 1239 idx = new_idx - orig_idx;
1222 config <<= 3 * idx; 1240 config <<= 3 * idx;
@@ -1226,6 +1244,10 @@ u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
1226 } 1244 }
1227 1245
1228 /* add the shared control bits back */ 1246 /* add the shared control bits back */
1247 if (uncore_nhmex)
1248 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1249 else
1250 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1229 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; 1251 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1230 if (modify) { 1252 if (modify) {
1231 /* adjust the main event selector */ 1253 /* adjust the main event selector */
@@ -1264,7 +1286,8 @@ again:
1264 } 1286 }
1265 1287
1266 /* for the match/mask registers */ 1288 /* for the match/mask registers */
1267 if ((uncore_box_is_fake(box) || !reg2->alloc) && 1289 if (reg2->idx != EXTRA_REG_NONE &&
1290 (uncore_box_is_fake(box) || !reg2->alloc) &&
1268 !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) 1291 !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
1269 goto fail; 1292 goto fail;
1270 1293
@@ -1278,7 +1301,8 @@ again:
1278 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) 1301 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
1279 nhmex_mbox_alter_er(event, idx[0], true); 1302 nhmex_mbox_alter_er(event, idx[0], true);
1280 reg1->alloc |= alloc; 1303 reg1->alloc |= alloc;
1281 reg2->alloc = 1; 1304 if (reg2->idx != EXTRA_REG_NONE)
1305 reg2->alloc = 1;
1282 } 1306 }
1283 return NULL; 1307 return NULL;
1284fail: 1308fail:
@@ -1342,9 +1366,6 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event
1342 struct extra_reg *er; 1366 struct extra_reg *er;
1343 unsigned msr; 1367 unsigned msr;
1344 int reg_idx = 0; 1368 int reg_idx = 0;
1345
1346 if (WARN_ON_ONCE(reg1->idx != -1))
1347 return -EINVAL;
1348 /* 1369 /*
1349 * The mbox events may require 2 extra MSRs at the most. But only 1370 * The mbox events may require 2 extra MSRs at the most. But only
1350 * the lower 32 bits in these MSRs are significant, so we can use 1371 * the lower 32 bits in these MSRs are significant, so we can use
@@ -1355,11 +1376,6 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event
1355 continue; 1376 continue;
1356 if (event->attr.config1 & ~er->valid_mask) 1377 if (event->attr.config1 & ~er->valid_mask)
1357 return -EINVAL; 1378 return -EINVAL;
1358 if (er->idx == __BITS_VALUE(reg1->idx, 0, 8) ||
1359 er->idx == __BITS_VALUE(reg1->idx, 1, 8))
1360 continue;
1361 if (WARN_ON_ONCE(reg_idx >= 2))
1362 return -EINVAL;
1363 1379
1364 msr = er->msr + type->msr_offset * box->pmu->pmu_idx; 1380 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
1365 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) 1381 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
@@ -1368,6 +1384,8 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event
1368 /* always use the 32~63 bits to pass the PLD config */ 1384 /* always use the 32~63 bits to pass the PLD config */
1369 if (er->idx == EXTRA_REG_NHMEX_M_PLD) 1385 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
1370 reg_idx = 1; 1386 reg_idx = 1;
1387 else if (WARN_ON_ONCE(reg_idx > 0))
1388 return -EINVAL;
1371 1389
1372 reg1->idx &= ~(0xff << (reg_idx * 8)); 1390 reg1->idx &= ~(0xff << (reg_idx * 8));
1373 reg1->reg &= ~(0xffff << (reg_idx * 16)); 1391 reg1->reg &= ~(0xffff << (reg_idx * 16));
@@ -1376,17 +1394,21 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event
1376 reg1->config = event->attr.config1; 1394 reg1->config = event->attr.config1;
1377 reg_idx++; 1395 reg_idx++;
1378 } 1396 }
1379 /* use config2 to pass the filter config */ 1397 /*
1380 reg2->idx = EXTRA_REG_NHMEX_M_FILTER; 1398 * The mbox only provides ability to perform address matching
1381 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) 1399 * for the PLD events.
1382 reg2->config = event->attr.config2; 1400 */
1383 else 1401 if (reg_idx == 2) {
1384 reg2->config = ~0ULL; 1402 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
1385 if (box->pmu->pmu_idx == 0) 1403 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
1386 reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; 1404 reg2->config = event->attr.config2;
1387 else 1405 else
1388 reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; 1406 reg2->config = ~0ULL;
1389 1407 if (box->pmu->pmu_idx == 0)
1408 reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
1409 else
1410 reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
1411 }
1390 return 0; 1412 return 0;
1391} 1413}
1392 1414
@@ -1422,34 +1444,36 @@ static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct per
1422 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), 1444 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
1423 nhmex_mbox_shared_reg_config(box, idx)); 1445 nhmex_mbox_shared_reg_config(box, idx));
1424 1446
1425 wrmsrl(reg2->reg, 0); 1447 if (reg2->idx != EXTRA_REG_NONE) {
1426 if (reg2->config != ~0ULL) { 1448 wrmsrl(reg2->reg, 0);
1427 wrmsrl(reg2->reg + 1, 1449 if (reg2->config != ~0ULL) {
1428 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); 1450 wrmsrl(reg2->reg + 1,
1429 wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & 1451 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
1430 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); 1452 wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
1431 wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); 1453 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
1454 wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
1455 }
1432 } 1456 }
1433 1457
1434 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); 1458 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
1435} 1459}
1436 1460
1437DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); 1461DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
1438DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); 1462DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
1439DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); 1463DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
1440DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); 1464DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
1441DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); 1465DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
1442DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); 1466DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
1443DEFINE_UNCORE_FORMAT_ATTR(filter_cfg, filter_cfg, "config2:63"); 1467DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
1444DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); 1468DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
1445DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); 1469DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
1446DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); 1470DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
1447DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); 1471DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
1448DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); 1472DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
1449DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); 1473DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
1450DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); 1474DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
1451DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); 1475DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
1452DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); 1476DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
1453 1477
1454static struct attribute *nhmex_uncore_mbox_formats_attr[] = { 1478static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
1455 &format_attr_count_mode.attr, 1479 &format_attr_count_mode.attr,
@@ -1458,7 +1482,7 @@ static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
1458 &format_attr_flag_mode.attr, 1482 &format_attr_flag_mode.attr,
1459 &format_attr_inc_sel.attr, 1483 &format_attr_inc_sel.attr,
1460 &format_attr_set_flag_sel.attr, 1484 &format_attr_set_flag_sel.attr,
1461 &format_attr_filter_cfg.attr, 1485 &format_attr_filter_cfg_en.attr,
1462 &format_attr_filter_match.attr, 1486 &format_attr_filter_match.attr,
1463 &format_attr_filter_mask.attr, 1487 &format_attr_filter_mask.attr,
1464 &format_attr_dsp.attr, 1488 &format_attr_dsp.attr,
@@ -1482,6 +1506,12 @@ static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
1482 { /* end: all zeroes */ }, 1506 { /* end: all zeroes */ },
1483}; 1507};
1484 1508
1509static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
1510 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
1511 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
1512 { /* end: all zeroes */ },
1513};
1514
1485static struct intel_uncore_ops nhmex_uncore_mbox_ops = { 1515static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
1486 NHMEX_UNCORE_OPS_COMMON_INIT(), 1516 NHMEX_UNCORE_OPS_COMMON_INIT(),
1487 .enable_event = nhmex_mbox_msr_enable_event, 1517 .enable_event = nhmex_mbox_msr_enable_event,
@@ -1513,7 +1543,7 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1513 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1543 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1514 int port; 1544 int port;
1515 1545
1516 /* adjust the main event selector */ 1546 /* adjust the main event selector and extra register index */
1517 if (reg1->idx % 2) { 1547 if (reg1->idx % 2) {
1518 reg1->idx--; 1548 reg1->idx--;
1519 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; 1549 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
@@ -1522,29 +1552,17 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1522 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; 1552 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1523 } 1553 }
1524 1554
1525 /* adjust address or config of extra register */ 1555 /* adjust extra register config */
1526 port = reg1->idx / 6 + box->pmu->pmu_idx * 4; 1556 port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
1527 switch (reg1->idx % 6) { 1557 switch (reg1->idx % 6) {
1528 case 0:
1529 reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port);
1530 break;
1531 case 1:
1532 reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port);
1533 break;
1534 case 2: 1558 case 2:
1535 /* the 8~15 bits to the 0~7 bits */ 1559 /* shift the 8~15 bits to the 0~7 bits */
1536 reg1->config >>= 8; 1560 reg1->config >>= 8;
1537 break; 1561 break;
1538 case 3: 1562 case 3:
1539 /* the 0~7 bits to the 8~15 bits */ 1563 /* shift the 0~7 bits to the 8~15 bits */
1540 reg1->config <<= 8; 1564 reg1->config <<= 8;
1541 break; 1565 break;
1542 case 4:
1543 reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port);
1544 break;
1545 case 5:
1546 reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port);
1547 break;
1548 }; 1566 };
1549} 1567}
1550 1568
@@ -1671,7 +1689,7 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event
1671 struct hw_perf_event *hwc = &event->hw; 1689 struct hw_perf_event *hwc = &event->hw;
1672 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 1690 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1673 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; 1691 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1674 int port, idx; 1692 int idx;
1675 1693
1676 idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> 1694 idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
1677 NHMEX_R_PMON_CTL_EV_SEL_SHIFT; 1695 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
@@ -1681,27 +1699,11 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event
1681 reg1->idx = idx; 1699 reg1->idx = idx;
1682 reg1->config = event->attr.config1; 1700 reg1->config = event->attr.config1;
1683 1701
1684 port = idx / 6 + box->pmu->pmu_idx * 4; 1702 switch (idx % 6) {
1685 idx %= 6;
1686 switch (idx) {
1687 case 0:
1688 reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port);
1689 break;
1690 case 1:
1691 reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port);
1692 break;
1693 case 2:
1694 case 3:
1695 reg1->reg = NHMEX_R_MSR_PORTN_QLX_CFG(port);
1696 break;
1697 case 4: 1703 case 4:
1698 case 5: 1704 case 5:
1699 if (idx == 4)
1700 reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port);
1701 else
1702 reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port);
1703 reg2->config = event->attr.config2;
1704 hwc->config |= event->attr.config & (~0ULL << 32); 1705 hwc->config |= event->attr.config & (~0ULL << 32);
1706 reg2->config = event->attr.config2;
1705 break; 1707 break;
1706 }; 1708 };
1707 return 0; 1709 return 0;
@@ -1727,28 +1729,34 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per
1727 struct hw_perf_event *hwc = &event->hw; 1729 struct hw_perf_event *hwc = &event->hw;
1728 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1730 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1729 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 1731 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1730 int idx, er_idx; 1732 int idx, port;
1731 1733
1732 idx = reg1->idx % 6; 1734 idx = reg1->idx;
1733 er_idx = idx; 1735 port = idx / 6 + box->pmu->pmu_idx * 4;
1734 if (er_idx > 2)
1735 er_idx--;
1736 er_idx += (reg1->idx / 6) * 5;
1737 1736
1738 switch (idx) { 1737 switch (idx % 6) {
1739 case 0: 1738 case 0:
1739 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
1740 break;
1740 case 1: 1741 case 1:
1741 wrmsrl(reg1->reg, reg1->config); 1742 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
1742 break; 1743 break;
1743 case 2: 1744 case 2:
1744 case 3: 1745 case 3:
1745 wrmsrl(reg1->reg, nhmex_rbox_shared_reg_config(box, er_idx)); 1746 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
1747 nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5));
1746 break; 1748 break;
1747 case 4: 1749 case 4:
1750 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
1751 hwc->config >> 32);
1752 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
1753 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
1754 break;
1748 case 5: 1755 case 5:
1749 wrmsrl(reg1->reg, reg1->config); 1756 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
1750 wrmsrl(reg1->reg + 1, hwc->config >> 32); 1757 hwc->config >> 32);
1751 wrmsrl(reg1->reg + 2, reg2->config); 1758 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
1759 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
1752 break; 1760 break;
1753 }; 1761 };
1754 1762
@@ -1756,8 +1764,8 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per
1756 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); 1764 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
1757} 1765}
1758 1766
1759DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config:32-63"); 1767DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
1760DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config1:0-63"); 1768DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
1761DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63"); 1769DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
1762DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15"); 1770DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
1763DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); 1771DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
@@ -2303,6 +2311,7 @@ int uncore_pmu_event_init(struct perf_event *event)
2303 event->hw.idx = -1; 2311 event->hw.idx = -1;
2304 event->hw.last_tag = ~0ULL; 2312 event->hw.last_tag = ~0ULL;
2305 event->hw.extra_reg.idx = EXTRA_REG_NONE; 2313 event->hw.extra_reg.idx = EXTRA_REG_NONE;
2314 event->hw.branch_reg.idx = EXTRA_REG_NONE;
2306 2315
2307 if (event->attr.config == UNCORE_FIXED_EVENT) { 2316 if (event->attr.config == UNCORE_FIXED_EVENT) {
2308 /* no fixed counter */ 2317 /* no fixed counter */
@@ -2373,7 +2382,7 @@ static void __init uncore_type_exit(struct intel_uncore_type *type)
2373 type->attr_groups[1] = NULL; 2382 type->attr_groups[1] = NULL;
2374} 2383}
2375 2384
2376static void uncore_types_exit(struct intel_uncore_type **types) 2385static void __init uncore_types_exit(struct intel_uncore_type **types)
2377{ 2386{
2378 int i; 2387 int i;
2379 for (i = 0; types[i]; i++) 2388 for (i = 0; types[i]; i++)
@@ -2814,7 +2823,13 @@ static int __init uncore_cpu_init(void)
2814 snbep_uncore_cbox.num_boxes = max_cores; 2823 snbep_uncore_cbox.num_boxes = max_cores;
2815 msr_uncores = snbep_msr_uncores; 2824 msr_uncores = snbep_msr_uncores;
2816 break; 2825 break;
2817 case 46: 2826 case 46: /* Nehalem-EX */
2827 uncore_nhmex = true;
2828 case 47: /* Westmere-EX aka. Xeon E7 */
2829 if (!uncore_nhmex)
2830 nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
2831 if (nhmex_uncore_cbox.num_boxes > max_cores)
2832 nhmex_uncore_cbox.num_boxes = max_cores;
2818 msr_uncores = nhmex_msr_uncores; 2833 msr_uncores = nhmex_msr_uncores;
2819 break; 2834 break;
2820 default: 2835 default:
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index c9e5dc56630a..5b81c1856aac 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -230,6 +230,7 @@
230#define NHMEX_S1_MSR_MASK 0xe5a 230#define NHMEX_S1_MSR_MASK 0xe5a
231 231
232#define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63) 232#define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63)
233#define NHMEX_S_EVENT_TO_R_PROG_EV 0
233 234
234/* NHM-EX Mbox */ 235/* NHM-EX Mbox */
235#define NHMEX_M0_MSR_GLOBAL_CTL 0xca0 236#define NHMEX_M0_MSR_GLOBAL_CTL 0xca0
@@ -275,18 +276,12 @@
275 NHMEX_M_PMON_CTL_INC_SEL_MASK | \ 276 NHMEX_M_PMON_CTL_INC_SEL_MASK | \
276 NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) 277 NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK)
277 278
278 279#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23))
279#define NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK 0x1f
280#define NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK (0x7 << 5)
281#define NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK (0x7 << 8)
282#define NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR (1 << 23)
283#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK \
284 (NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK | \
285 NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK | \
286 NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK | \
287 NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR)
288#define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (11 + 3 * (n))) 280#define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (11 + 3 * (n)))
289 281
282#define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24))
283#define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (12 + 3 * (n)))
284
290/* 285/*
291 * use the 9~13 bits to select event If the 7th bit is not set, 286 * use the 9~13 bits to select event If the 7th bit is not set,
292 * otherwise use the 19~21 bits to select event. 287 * otherwise use the 19~21 bits to select event.
@@ -368,6 +363,7 @@ struct intel_uncore_type {
368 unsigned num_shared_regs:8; 363 unsigned num_shared_regs:8;
369 unsigned single_fixed:1; 364 unsigned single_fixed:1;
370 unsigned pair_ctr_ctl:1; 365 unsigned pair_ctr_ctl:1;
366 unsigned *msr_offsets;
371 struct event_constraint unconstrainted; 367 struct event_constraint unconstrainted;
372 struct event_constraint *constraints; 368 struct event_constraint *constraints;
373 struct intel_uncore_pmu *pmus; 369 struct intel_uncore_pmu *pmus;
@@ -485,29 +481,31 @@ unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
485 return idx * 8 + box->pmu->type->perf_ctr; 481 return idx * 8 + box->pmu->type->perf_ctr;
486} 482}
487 483
488static inline 484static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
489unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) 485{
486 struct intel_uncore_pmu *pmu = box->pmu;
487 return pmu->type->msr_offsets ?
488 pmu->type->msr_offsets[pmu->pmu_idx] :
489 pmu->type->msr_offset * pmu->pmu_idx;
490}
491
492static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
490{ 493{
491 if (!box->pmu->type->box_ctl) 494 if (!box->pmu->type->box_ctl)
492 return 0; 495 return 0;
493 return box->pmu->type->box_ctl + 496 return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
494 box->pmu->type->msr_offset * box->pmu->pmu_idx;
495} 497}
496 498
497static inline 499static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
498unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
499{ 500{
500 if (!box->pmu->type->fixed_ctl) 501 if (!box->pmu->type->fixed_ctl)
501 return 0; 502 return 0;
502 return box->pmu->type->fixed_ctl + 503 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
503 box->pmu->type->msr_offset * box->pmu->pmu_idx;
504} 504}
505 505
506static inline 506static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
507unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
508{ 507{
509 return box->pmu->type->fixed_ctr + 508 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
510 box->pmu->type->msr_offset * box->pmu->pmu_idx;
511} 509}
512 510
513static inline 511static inline
@@ -515,7 +513,7 @@ unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
515{ 513{
516 return box->pmu->type->event_ctl + 514 return box->pmu->type->event_ctl +
517 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 515 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
518 box->pmu->type->msr_offset * box->pmu->pmu_idx; 516 uncore_msr_box_offset(box);
519} 517}
520 518
521static inline 519static inline
@@ -523,7 +521,7 @@ unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
523{ 521{
524 return box->pmu->type->perf_ctr + 522 return box->pmu->type->perf_ctr +
525 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 523 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
526 box->pmu->type->msr_offset * box->pmu->pmu_idx; 524 uncore_msr_box_offset(box);
527} 525}
528 526
529static inline 527static inline
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 7ad683d78645..d44f7829968e 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -270,7 +270,7 @@ void fixup_irqs(void)
270 270
271 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 271 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
272 break_affinity = 1; 272 break_affinity = 1;
273 affinity = cpu_all_mask; 273 affinity = cpu_online_mask;
274 } 274 }
275 275
276 chip = irq_data_get_irq_chip(data); 276 chip = irq_data_get_irq_chip(data);
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 8a2ce8fd41c0..82746f942cd8 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -143,11 +143,12 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr,
143 unsigned int *current_size) 143 unsigned int *current_size)
144{ 144{
145 struct microcode_header_amd *mc_hdr; 145 struct microcode_header_amd *mc_hdr;
146 unsigned int actual_size; 146 unsigned int actual_size, patch_size;
147 u16 equiv_cpu_id; 147 u16 equiv_cpu_id;
148 148
149 /* size of the current patch we're staring at */ 149 /* size of the current patch we're staring at */
150 *current_size = *(u32 *)(ucode_ptr + 4) + SECTION_HDR_SIZE; 150 patch_size = *(u32 *)(ucode_ptr + 4);
151 *current_size = patch_size + SECTION_HDR_SIZE;
151 152
152 equiv_cpu_id = find_equiv_id(); 153 equiv_cpu_id = find_equiv_id();
153 if (!equiv_cpu_id) 154 if (!equiv_cpu_id)
@@ -174,7 +175,7 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr,
174 /* 175 /*
175 * now that the header looks sane, verify its size 176 * now that the header looks sane, verify its size
176 */ 177 */
177 actual_size = verify_ucode_size(cpu, *current_size, leftover_size); 178 actual_size = verify_ucode_size(cpu, patch_size, leftover_size);
178 if (!actual_size) 179 if (!actual_size)
179 return 0; 180 return 0;
180 181
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 97d9a9914ba8..a3b57a27be88 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -475,13 +475,26 @@ register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
475 return address_mask(ctxt, reg); 475 return address_mask(ctxt, reg);
476} 476}
477 477
478static void masked_increment(ulong *reg, ulong mask, int inc)
479{
480 assign_masked(reg, *reg + inc, mask);
481}
482
478static inline void 483static inline void
479register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) 484register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
480{ 485{
486 ulong mask;
487
481 if (ctxt->ad_bytes == sizeof(unsigned long)) 488 if (ctxt->ad_bytes == sizeof(unsigned long))
482 *reg += inc; 489 mask = ~0UL;
483 else 490 else
484 *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt)); 491 mask = ad_mask(ctxt);
492 masked_increment(reg, mask, inc);
493}
494
495static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
496{
497 masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc);
485} 498}
486 499
487static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) 500static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@@ -1522,8 +1535,8 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1522{ 1535{
1523 struct segmented_address addr; 1536 struct segmented_address addr;
1524 1537
1525 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes); 1538 rsp_increment(ctxt, -bytes);
1526 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); 1539 addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
1527 addr.seg = VCPU_SREG_SS; 1540 addr.seg = VCPU_SREG_SS;
1528 1541
1529 return segmented_write(ctxt, addr, data, bytes); 1542 return segmented_write(ctxt, addr, data, bytes);
@@ -1542,13 +1555,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1542 int rc; 1555 int rc;
1543 struct segmented_address addr; 1556 struct segmented_address addr;
1544 1557
1545 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); 1558 addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
1546 addr.seg = VCPU_SREG_SS; 1559 addr.seg = VCPU_SREG_SS;
1547 rc = segmented_read(ctxt, addr, dest, len); 1560 rc = segmented_read(ctxt, addr, dest, len);
1548 if (rc != X86EMUL_CONTINUE) 1561 if (rc != X86EMUL_CONTINUE)
1549 return rc; 1562 return rc;
1550 1563
1551 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len); 1564 rsp_increment(ctxt, len);
1552 return rc; 1565 return rc;
1553} 1566}
1554 1567
@@ -1688,8 +1701,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
1688 1701
1689 while (reg >= VCPU_REGS_RAX) { 1702 while (reg >= VCPU_REGS_RAX) {
1690 if (reg == VCPU_REGS_RSP) { 1703 if (reg == VCPU_REGS_RSP) {
1691 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], 1704 rsp_increment(ctxt, ctxt->op_bytes);
1692 ctxt->op_bytes);
1693 --reg; 1705 --reg;
1694 } 1706 }
1695 1707
@@ -2825,7 +2837,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2825 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); 2837 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2826 if (rc != X86EMUL_CONTINUE) 2838 if (rc != X86EMUL_CONTINUE)
2827 return rc; 2839 return rc;
2828 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val); 2840 rsp_increment(ctxt, ctxt->src.val);
2829 return X86EMUL_CONTINUE; 2841 return X86EMUL_CONTINUE;
2830} 2842}
2831 2843
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 01ca00423938..7fbd0d273ea8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4113,16 +4113,21 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
4113 LIST_HEAD(invalid_list); 4113 LIST_HEAD(invalid_list);
4114 4114
4115 /* 4115 /*
4116 * Never scan more than sc->nr_to_scan VM instances.
4117 * Will not hit this condition practically since we do not try
4118 * to shrink more than one VM and it is very unlikely to see
4119 * !n_used_mmu_pages so many times.
4120 */
4121 if (!nr_to_scan--)
4122 break;
4123 /*
4116 * n_used_mmu_pages is accessed without holding kvm->mmu_lock 4124 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
4117 * here. We may skip a VM instance errorneosly, but we do not 4125 * here. We may skip a VM instance errorneosly, but we do not
4118 * want to shrink a VM that only started to populate its MMU 4126 * want to shrink a VM that only started to populate its MMU
4119 * anyway. 4127 * anyway.
4120 */ 4128 */
4121 if (kvm->arch.n_used_mmu_pages > 0) { 4129 if (!kvm->arch.n_used_mmu_pages)
4122 if (!nr_to_scan--)
4123 break;
4124 continue; 4130 continue;
4125 }
4126 4131
4127 idx = srcu_read_lock(&kvm->srcu); 4132 idx = srcu_read_lock(&kvm->srcu);
4128 spin_lock(&kvm->mmu_lock); 4133 spin_lock(&kvm->mmu_lock);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 42bce48f6928..148ed666e311 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -806,7 +806,7 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
806 * kvm-specific. Those are put in the beginning of the list. 806 * kvm-specific. Those are put in the beginning of the list.
807 */ 807 */
808 808
809#define KVM_SAVE_MSRS_BEGIN 9 809#define KVM_SAVE_MSRS_BEGIN 10
810static u32 msrs_to_save[] = { 810static u32 msrs_to_save[] = {
811 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 811 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
812 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, 812 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
@@ -2000,6 +2000,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2000 case MSR_KVM_STEAL_TIME: 2000 case MSR_KVM_STEAL_TIME:
2001 data = vcpu->arch.st.msr_val; 2001 data = vcpu->arch.st.msr_val;
2002 break; 2002 break;
2003 case MSR_KVM_PV_EOI_EN:
2004 data = vcpu->arch.pv_eoi.msr_val;
2005 break;
2003 case MSR_IA32_P5_MC_ADDR: 2006 case MSR_IA32_P5_MC_ADDR:
2004 case MSR_IA32_P5_MC_TYPE: 2007 case MSR_IA32_P5_MC_TYPE:
2005 case MSR_IA32_MCG_CAP: 2008 case MSR_IA32_MCG_CAP:
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index f6679a7fb8ca..b91e48512425 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
56} 56}
57 57
58/* 58/*
59 * search for a shareable pmd page for hugetlb. 59 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
60 * and returns the corresponding pte. While this is not necessary for the
61 * !shared pmd case because we can allocate the pmd later as well, it makes the
62 * code much cleaner. pmd allocation is essential for the shared case because
63 * pud has to be populated inside the same i_mmap_mutex section - otherwise
64 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
65 * bad pmd for sharing.
60 */ 66 */
61static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 67static pte_t *
68huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
62{ 69{
63 struct vm_area_struct *vma = find_vma(mm, addr); 70 struct vm_area_struct *vma = find_vma(mm, addr);
64 struct address_space *mapping = vma->vm_file->f_mapping; 71 struct address_space *mapping = vma->vm_file->f_mapping;
@@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
68 struct vm_area_struct *svma; 75 struct vm_area_struct *svma;
69 unsigned long saddr; 76 unsigned long saddr;
70 pte_t *spte = NULL; 77 pte_t *spte = NULL;
78 pte_t *pte;
71 79
72 if (!vma_shareable(vma, addr)) 80 if (!vma_shareable(vma, addr))
73 return; 81 return (pte_t *)pmd_alloc(mm, pud, addr);
74 82
75 mutex_lock(&mapping->i_mmap_mutex); 83 mutex_lock(&mapping->i_mmap_mutex);
76 vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { 84 vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
@@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
97 put_page(virt_to_page(spte)); 105 put_page(virt_to_page(spte));
98 spin_unlock(&mm->page_table_lock); 106 spin_unlock(&mm->page_table_lock);
99out: 107out:
108 pte = (pte_t *)pmd_alloc(mm, pud, addr);
100 mutex_unlock(&mapping->i_mmap_mutex); 109 mutex_unlock(&mapping->i_mmap_mutex);
110 return pte;
101} 111}
102 112
103/* 113/*
@@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
142 } else { 152 } else {
143 BUG_ON(sz != PMD_SIZE); 153 BUG_ON(sz != PMD_SIZE);
144 if (pud_none(*pud)) 154 if (pud_none(*pud))
145 huge_pmd_share(mm, addr, pud); 155 pte = huge_pmd_share(mm, addr, pud);
146 pte = (pte_t *) pmd_alloc(mm, pud, addr); 156 else
157 pte = (pte_t *)pmd_alloc(mm, pud, addr);
147 } 158 }
148 } 159 }
149 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); 160 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 931930a96160..a718e0d23503 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -919,13 +919,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
919 919
920 /* 920 /*
921 * On success we use clflush, when the CPU supports it to 921 * On success we use clflush, when the CPU supports it to
922 * avoid the wbindv. If the CPU does not support it, in the 922 * avoid the wbindv. If the CPU does not support it and in the
923 * error case, and during early boot (for EFI) we fall back 923 * error case we fall back to cpa_flush_all (which uses
924 * to cpa_flush_all (which uses wbinvd): 924 * wbindv):
925 */ 925 */
926 if (early_boot_irqs_disabled) 926 if (!ret && cpu_has_clflush) {
927 __cpa_flush_all((void *)(long)cache);
928 else if (!ret && cpu_has_clflush) {
929 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { 927 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
930 cpa_flush_array(addr, numpages, cache, 928 cpa_flush_array(addr, numpages, cache,
931 cpa.flags, pages); 929 cpa.flags, pages);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 2dc29f51e75a..92660edaa1e7 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -234,7 +234,22 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
234 return status; 234 return status;
235} 235}
236 236
237static int efi_set_rtc_mmss(unsigned long nowtime) 237static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
238 efi_time_cap_t *tc)
239{
240 unsigned long flags;
241 efi_status_t status;
242
243 spin_lock_irqsave(&rtc_lock, flags);
244 efi_call_phys_prelog();
245 status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm),
246 virt_to_phys(tc));
247 efi_call_phys_epilog();
248 spin_unlock_irqrestore(&rtc_lock, flags);
249 return status;
250}
251
252int efi_set_rtc_mmss(unsigned long nowtime)
238{ 253{
239 int real_seconds, real_minutes; 254 int real_seconds, real_minutes;
240 efi_status_t status; 255 efi_status_t status;
@@ -263,7 +278,7 @@ static int efi_set_rtc_mmss(unsigned long nowtime)
263 return 0; 278 return 0;
264} 279}
265 280
266static unsigned long efi_get_time(void) 281unsigned long efi_get_time(void)
267{ 282{
268 efi_status_t status; 283 efi_status_t status;
269 efi_time_t eft; 284 efi_time_t eft;
@@ -606,13 +621,18 @@ static int __init efi_runtime_init(void)
606 } 621 }
607 /* 622 /*
608 * We will only need *early* access to the following 623 * We will only need *early* access to the following
609 * EFI runtime service before set_virtual_address_map 624 * two EFI runtime services before set_virtual_address_map
610 * is invoked. 625 * is invoked.
611 */ 626 */
627 efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
612 efi_phys.set_virtual_address_map = 628 efi_phys.set_virtual_address_map =
613 (efi_set_virtual_address_map_t *) 629 (efi_set_virtual_address_map_t *)
614 runtime->set_virtual_address_map; 630 runtime->set_virtual_address_map;
615 631 /*
632 * Make efi_get_time can be called before entering
633 * virtual mode.
634 */
635 efi.get_time = phys_efi_get_time;
616 early_iounmap(runtime, sizeof(efi_runtime_services_t)); 636 early_iounmap(runtime, sizeof(efi_runtime_services_t));
617 637
618 return 0; 638 return 0;
@@ -700,10 +720,12 @@ void __init efi_init(void)
700 efi_enabled = 0; 720 efi_enabled = 0;
701 return; 721 return;
702 } 722 }
723#ifdef CONFIG_X86_32
703 if (efi_native) { 724 if (efi_native) {
704 x86_platform.get_wallclock = efi_get_time; 725 x86_platform.get_wallclock = efi_get_time;
705 x86_platform.set_wallclock = efi_set_rtc_mmss; 726 x86_platform.set_wallclock = efi_set_rtc_mmss;
706 } 727 }
728#endif
707 729
708#if EFI_DEBUG 730#if EFI_DEBUG
709 print_efi_memmap(); 731 print_efi_memmap();
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index b2d534cab25f..88692871823f 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -72,7 +72,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
72 -Wall -Wstrict-prototypes \ 72 -Wall -Wstrict-prototypes \
73 -march=i386 -mregparm=3 \ 73 -march=i386 -mregparm=3 \
74 -include $(srctree)/$(src)/../../boot/code16gcc.h \ 74 -include $(srctree)/$(src)/../../boot/code16gcc.h \
75 -fno-strict-aliasing -fomit-frame-pointer \ 75 -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
76 $(call cc-option, -ffreestanding) \ 76 $(call cc-option, -ffreestanding) \
77 $(call cc-option, -fno-toplevel-reorder,\ 77 $(call cc-option, -fno-toplevel-reorder,\
78 $(call cc-option, -fno-unit-at-a-time)) \ 78 $(call cc-option, -fno-unit-at-a-time)) \
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 29aed7ac2c02..a582bfed95bb 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -60,8 +60,8 @@
6051 common getsockname sys_getsockname 6051 common getsockname sys_getsockname
6152 common getpeername sys_getpeername 6152 common getpeername sys_getpeername
6253 common socketpair sys_socketpair 6253 common socketpair sys_socketpair
6354 common setsockopt sys_setsockopt 6354 64 setsockopt sys_setsockopt
6455 common getsockopt sys_getsockopt 6455 64 getsockopt sys_getsockopt
6556 common clone stub_clone 6556 common clone stub_clone
6657 common fork stub_fork 6657 common fork stub_fork
6758 common vfork stub_vfork 6758 common vfork stub_vfork
@@ -353,3 +353,5 @@
353538 x32 sendmmsg compat_sys_sendmmsg 353538 x32 sendmmsg compat_sys_sendmmsg
354539 x32 process_vm_readv compat_sys_process_vm_readv 354539 x32 process_vm_readv compat_sys_process_vm_readv
355540 x32 process_vm_writev compat_sys_process_vm_writev 355540 x32 process_vm_writev compat_sys_process_vm_writev
356541 x32 setsockopt compat_sys_setsockopt
357542 x32 getsockopt compat_sys_getsockopt
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index bf4bda6d3e9a..9642d4a38602 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -31,7 +31,6 @@
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/gfp.h> 32#include <linux/gfp.h>
33#include <linux/memblock.h> 33#include <linux/memblock.h>
34#include <linux/syscore_ops.h>
35 34
36#include <xen/xen.h> 35#include <xen/xen.h>
37#include <xen/interface/xen.h> 36#include <xen/interface/xen.h>
@@ -1470,130 +1469,38 @@ asmlinkage void __init xen_start_kernel(void)
1470#endif 1469#endif
1471} 1470}
1472 1471
1473#ifdef CONFIG_XEN_PVHVM 1472void __ref xen_hvm_init_shared_info(void)
1474/*
1475 * The pfn containing the shared_info is located somewhere in RAM. This
1476 * will cause trouble if the current kernel is doing a kexec boot into a
1477 * new kernel. The new kernel (and its startup code) can not know where
1478 * the pfn is, so it can not reserve the page. The hypervisor will
1479 * continue to update the pfn, and as a result memory corruption occours
1480 * in the new kernel.
1481 *
1482 * One way to work around this issue is to allocate a page in the
1483 * xen-platform pci device's BAR memory range. But pci init is done very
1484 * late and the shared_info page is already in use very early to read
1485 * the pvclock. So moving the pfn from RAM to MMIO is racy because some
1486 * code paths on other vcpus could access the pfn during the small
1487 * window when the old pfn is moved to the new pfn. There is even a
1488 * small window were the old pfn is not backed by a mfn, and during that
1489 * time all reads return -1.
1490 *
1491 * Because it is not known upfront where the MMIO region is located it
1492 * can not be used right from the start in xen_hvm_init_shared_info.
1493 *
1494 * To minimise trouble the move of the pfn is done shortly before kexec.
1495 * This does not eliminate the race because all vcpus are still online
1496 * when the syscore_ops will be called. But hopefully there is no work
1497 * pending at this point in time. Also the syscore_op is run last which
1498 * reduces the risk further.
1499 */
1500
1501static struct shared_info *xen_hvm_shared_info;
1502
1503static void xen_hvm_connect_shared_info(unsigned long pfn)
1504{ 1473{
1474 int cpu;
1505 struct xen_add_to_physmap xatp; 1475 struct xen_add_to_physmap xatp;
1476 static struct shared_info *shared_info_page = 0;
1506 1477
1478 if (!shared_info_page)
1479 shared_info_page = (struct shared_info *)
1480 extend_brk(PAGE_SIZE, PAGE_SIZE);
1507 xatp.domid = DOMID_SELF; 1481 xatp.domid = DOMID_SELF;
1508 xatp.idx = 0; 1482 xatp.idx = 0;
1509 xatp.space = XENMAPSPACE_shared_info; 1483 xatp.space = XENMAPSPACE_shared_info;
1510 xatp.gpfn = pfn; 1484 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
1511 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 1485 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
1512 BUG(); 1486 BUG();
1513 1487
1514} 1488 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
1515static void xen_hvm_set_shared_info(struct shared_info *sip)
1516{
1517 int cpu;
1518
1519 HYPERVISOR_shared_info = sip;
1520 1489
1521 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info 1490 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
1522 * page, we use it in the event channel upcall and in some pvclock 1491 * page, we use it in the event channel upcall and in some pvclock
1523 * related functions. We don't need the vcpu_info placement 1492 * related functions. We don't need the vcpu_info placement
1524 * optimizations because we don't use any pv_mmu or pv_irq op on 1493 * optimizations because we don't use any pv_mmu or pv_irq op on
1525 * HVM. 1494 * HVM.
1526 * When xen_hvm_set_shared_info is run at boot time only vcpu 0 is 1495 * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
1527 * online but xen_hvm_set_shared_info is run at resume time too and 1496 * online but xen_hvm_init_shared_info is run at resume time too and
1528 * in that case multiple vcpus might be online. */ 1497 * in that case multiple vcpus might be online. */
1529 for_each_online_cpu(cpu) { 1498 for_each_online_cpu(cpu) {
1530 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 1499 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
1531 } 1500 }
1532} 1501}
1533 1502
1534/* Reconnect the shared_info pfn to a mfn */ 1503#ifdef CONFIG_XEN_PVHVM
1535void xen_hvm_resume_shared_info(void)
1536{
1537 xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT);
1538}
1539
1540#ifdef CONFIG_KEXEC
1541static struct shared_info *xen_hvm_shared_info_kexec;
1542static unsigned long xen_hvm_shared_info_pfn_kexec;
1543
1544/* Remember a pfn in MMIO space for kexec reboot */
1545void __devinit xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn)
1546{
1547 xen_hvm_shared_info_kexec = sip;
1548 xen_hvm_shared_info_pfn_kexec = pfn;
1549}
1550
1551static void xen_hvm_syscore_shutdown(void)
1552{
1553 struct xen_memory_reservation reservation = {
1554 .domid = DOMID_SELF,
1555 .nr_extents = 1,
1556 };
1557 unsigned long prev_pfn;
1558 int rc;
1559
1560 if (!xen_hvm_shared_info_kexec)
1561 return;
1562
1563 prev_pfn = __pa(xen_hvm_shared_info) >> PAGE_SHIFT;
1564 set_xen_guest_handle(reservation.extent_start, &prev_pfn);
1565
1566 /* Move pfn to MMIO, disconnects previous pfn from mfn */
1567 xen_hvm_connect_shared_info(xen_hvm_shared_info_pfn_kexec);
1568
1569 /* Update pointers, following hypercall is also a memory barrier */
1570 xen_hvm_set_shared_info(xen_hvm_shared_info_kexec);
1571
1572 /* Allocate new mfn for previous pfn */
1573 do {
1574 rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
1575 if (rc == 0)
1576 msleep(123);
1577 } while (rc == 0);
1578
1579 /* Make sure the previous pfn is really connected to a (new) mfn */
1580 BUG_ON(rc != 1);
1581}
1582
1583static struct syscore_ops xen_hvm_syscore_ops = {
1584 .shutdown = xen_hvm_syscore_shutdown,
1585};
1586#endif
1587
1588/* Use a pfn in RAM, may move to MMIO before kexec. */
1589static void __init xen_hvm_init_shared_info(void)
1590{
1591 /* Remember pointer for resume */
1592 xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE);
1593 xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT);
1594 xen_hvm_set_shared_info(xen_hvm_shared_info);
1595}
1596
1597static void __init init_hvm_pv_info(void) 1504static void __init init_hvm_pv_info(void)
1598{ 1505{
1599 int major, minor; 1506 int major, minor;
@@ -1644,9 +1551,6 @@ static void __init xen_hvm_guest_init(void)
1644 init_hvm_pv_info(); 1551 init_hvm_pv_info();
1645 1552
1646 xen_hvm_init_shared_info(); 1553 xen_hvm_init_shared_info();
1647#ifdef CONFIG_KEXEC
1648 register_syscore_ops(&xen_hvm_syscore_ops);
1649#endif
1650 1554
1651 if (xen_feature(XENFEAT_hvm_callback_vector)) 1555 if (xen_feature(XENFEAT_hvm_callback_vector))
1652 xen_have_vector_callback = 1; 1556 xen_have_vector_callback = 1;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index b2e91d40a4cb..d4b255463253 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -196,9 +196,11 @@ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
196 196
197/* When we populate back during bootup, the amount of pages can vary. The 197/* When we populate back during bootup, the amount of pages can vary. The
198 * max we have is seen is 395979, but that does not mean it can't be more. 198 * max we have is seen is 395979, but that does not mean it can't be more.
199 * But some machines can have 3GB I/O holes even. So lets reserve enough 199 * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle
200 * for 4GB of I/O and E820 holes. */ 200 * it can re-use Xen provided mfn_list array, so we only need to allocate at
201RESERVE_BRK(p2m_populated, PMD_SIZE * 4); 201 * most three P2M top nodes. */
202RESERVE_BRK(p2m_populated, PAGE_SIZE * 3);
203
202static inline unsigned p2m_top_index(unsigned long pfn) 204static inline unsigned p2m_top_index(unsigned long pfn)
203{ 205{
204 BUG_ON(pfn >= MAX_P2M_PFN); 206 BUG_ON(pfn >= MAX_P2M_PFN);
@@ -575,12 +577,99 @@ static bool __init early_alloc_p2m(unsigned long pfn)
575 } 577 }
576 return true; 578 return true;
577} 579}
580
581/*
582 * Skim over the P2M tree looking at pages that are either filled with
583 * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and
584 * replace the P2M leaf with a p2m_missing or p2m_identity.
585 * Stick the old page in the new P2M tree location.
586 */
587bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn)
588{
589 unsigned topidx;
590 unsigned mididx;
591 unsigned ident_pfns;
592 unsigned inv_pfns;
593 unsigned long *p2m;
594 unsigned long *mid_mfn_p;
595 unsigned idx;
596 unsigned long pfn;
597
598 /* We only look when this entails a P2M middle layer */
599 if (p2m_index(set_pfn))
600 return false;
601
602 for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
603 topidx = p2m_top_index(pfn);
604
605 if (!p2m_top[topidx])
606 continue;
607
608 if (p2m_top[topidx] == p2m_mid_missing)
609 continue;
610
611 mididx = p2m_mid_index(pfn);
612 p2m = p2m_top[topidx][mididx];
613 if (!p2m)
614 continue;
615
616 if ((p2m == p2m_missing) || (p2m == p2m_identity))
617 continue;
618
619 if ((unsigned long)p2m == INVALID_P2M_ENTRY)
620 continue;
621
622 ident_pfns = 0;
623 inv_pfns = 0;
624 for (idx = 0; idx < P2M_PER_PAGE; idx++) {
625 /* IDENTITY_PFNs are 1:1 */
626 if (p2m[idx] == IDENTITY_FRAME(pfn + idx))
627 ident_pfns++;
628 else if (p2m[idx] == INVALID_P2M_ENTRY)
629 inv_pfns++;
630 else
631 break;
632 }
633 if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE))
634 goto found;
635 }
636 return false;
637found:
638 /* Found one, replace old with p2m_identity or p2m_missing */
639 p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);
640 /* And the other for save/restore.. */
641 mid_mfn_p = p2m_top_mfn_p[topidx];
642 /* NOTE: Even if it is a p2m_identity it should still be point to
643 * a page filled with INVALID_P2M_ENTRY entries. */
644 mid_mfn_p[mididx] = virt_to_mfn(p2m_missing);
645
646 /* Reset where we want to stick the old page in. */
647 topidx = p2m_top_index(set_pfn);
648 mididx = p2m_mid_index(set_pfn);
649
650 /* This shouldn't happen */
651 if (WARN_ON(p2m_top[topidx] == p2m_mid_missing))
652 early_alloc_p2m(set_pfn);
653
654 if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing))
655 return false;
656
657 p2m_init(p2m);
658 p2m_top[topidx][mididx] = p2m;
659 mid_mfn_p = p2m_top_mfn_p[topidx];
660 mid_mfn_p[mididx] = virt_to_mfn(p2m);
661
662 return true;
663}
578bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) 664bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
579{ 665{
580 if (unlikely(!__set_phys_to_machine(pfn, mfn))) { 666 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
581 if (!early_alloc_p2m(pfn)) 667 if (!early_alloc_p2m(pfn))
582 return false; 668 return false;
583 669
670 if (early_can_reuse_p2m_middle(pfn, mfn))
671 return __set_phys_to_machine(pfn, mfn);
672
584 if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/)) 673 if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
585 return false; 674 return false;
586 675
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index ead85576d54a..d11ca11d14fc 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -78,9 +78,16 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
78 memblock_reserve(start, size); 78 memblock_reserve(start, size);
79 79
80 xen_max_p2m_pfn = PFN_DOWN(start + size); 80 xen_max_p2m_pfn = PFN_DOWN(start + size);
81 for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
82 unsigned long mfn = pfn_to_mfn(pfn);
83
84 if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
85 continue;
86 WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
87 pfn, mfn);
81 88
82 for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
83 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 89 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
90 }
84} 91}
85 92
86static unsigned long __init xen_do_chunk(unsigned long start, 93static unsigned long __init xen_do_chunk(unsigned long start,
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index ae8a00c39de4..45329c8c226e 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
30{ 30{
31#ifdef CONFIG_XEN_PVHVM 31#ifdef CONFIG_XEN_PVHVM
32 int cpu; 32 int cpu;
33 xen_hvm_resume_shared_info(); 33 xen_hvm_init_shared_info();
34 xen_callback_vector(); 34 xen_callback_vector();
35 xen_unplug_emulated_devices(); 35 xen_unplug_emulated_devices();
36 if (xen_feature(XENFEAT_hvm_safe_pvclock)) { 36 if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 1e4329e04e0f..202d4c150154 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -41,7 +41,7 @@ void xen_enable_syscall(void);
41void xen_vcpu_restore(void); 41void xen_vcpu_restore(void);
42 42
43void xen_callback_vector(void); 43void xen_callback_vector(void);
44void xen_hvm_resume_shared_info(void); 44void xen_hvm_init_shared_info(void);
45void xen_unplug_emulated_devices(void); 45void xen_unplug_emulated_devices(void);
46 46
47void __init xen_build_dynamic_phys_to_machine(void); 47void __init xen_build_dynamic_phys_to_machine(void);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 2b461b496a78..19cc761cacb2 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -44,6 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
44 struct request_queue *q = bdev_get_queue(bdev); 44 struct request_queue *q = bdev_get_queue(bdev);
45 int type = REQ_WRITE | REQ_DISCARD; 45 int type = REQ_WRITE | REQ_DISCARD;
46 unsigned int max_discard_sectors; 46 unsigned int max_discard_sectors;
47 unsigned int granularity, alignment, mask;
47 struct bio_batch bb; 48 struct bio_batch bb;
48 struct bio *bio; 49 struct bio *bio;
49 int ret = 0; 50 int ret = 0;
@@ -54,18 +55,20 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
54 if (!blk_queue_discard(q)) 55 if (!blk_queue_discard(q))
55 return -EOPNOTSUPP; 56 return -EOPNOTSUPP;
56 57
58 /* Zero-sector (unknown) and one-sector granularities are the same. */
59 granularity = max(q->limits.discard_granularity >> 9, 1U);
60 mask = granularity - 1;
61 alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
62
57 /* 63 /*
58 * Ensure that max_discard_sectors is of the proper 64 * Ensure that max_discard_sectors is of the proper
59 * granularity 65 * granularity, so that requests stay aligned after a split.
60 */ 66 */
61 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); 67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
68 max_discard_sectors = round_down(max_discard_sectors, granularity);
62 if (unlikely(!max_discard_sectors)) { 69 if (unlikely(!max_discard_sectors)) {
63 /* Avoid infinite loop below. Being cautious never hurts. */ 70 /* Avoid infinite loop below. Being cautious never hurts. */
64 return -EOPNOTSUPP; 71 return -EOPNOTSUPP;
65 } else if (q->limits.discard_granularity) {
66 unsigned int disc_sects = q->limits.discard_granularity >> 9;
67
68 max_discard_sectors &= ~(disc_sects - 1);
69 } 72 }
70 73
71 if (flags & BLKDEV_DISCARD_SECURE) { 74 if (flags & BLKDEV_DISCARD_SECURE) {
@@ -79,25 +82,37 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
79 bb.wait = &wait; 82 bb.wait = &wait;
80 83
81 while (nr_sects) { 84 while (nr_sects) {
85 unsigned int req_sects;
86 sector_t end_sect;
87
82 bio = bio_alloc(gfp_mask, 1); 88 bio = bio_alloc(gfp_mask, 1);
83 if (!bio) { 89 if (!bio) {
84 ret = -ENOMEM; 90 ret = -ENOMEM;
85 break; 91 break;
86 } 92 }
87 93
94 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
95
96 /*
97 * If splitting a request, and the next starting sector would be
98 * misaligned, stop the discard at the previous aligned sector.
99 */
100 end_sect = sector + req_sects;
101 if (req_sects < nr_sects && (end_sect & mask) != alignment) {
102 end_sect =
103 round_down(end_sect - alignment, granularity)
104 + alignment;
105 req_sects = end_sect - sector;
106 }
107
88 bio->bi_sector = sector; 108 bio->bi_sector = sector;
89 bio->bi_end_io = bio_batch_end_io; 109 bio->bi_end_io = bio_batch_end_io;
90 bio->bi_bdev = bdev; 110 bio->bi_bdev = bdev;
91 bio->bi_private = &bb; 111 bio->bi_private = &bb;
92 112
93 if (nr_sects > max_discard_sectors) { 113 bio->bi_size = req_sects << 9;
94 bio->bi_size = max_discard_sectors << 9; 114 nr_sects -= req_sects;
95 nr_sects -= max_discard_sectors; 115 sector = end_sect;
96 sector += max_discard_sectors;
97 } else {
98 bio->bi_size = nr_sects << 9;
99 nr_sects = 0;
100 }
101 116
102 atomic_inc(&bb.done); 117 atomic_inc(&bb.done);
103 submit_bio(type, bio); 118 submit_bio(type, bio);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 160035f54882..e76279e41162 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -110,6 +110,49 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
110 return 0; 110 return 0;
111} 111}
112 112
113static void
114__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
115 struct scatterlist *sglist, struct bio_vec **bvprv,
116 struct scatterlist **sg, int *nsegs, int *cluster)
117{
118
119 int nbytes = bvec->bv_len;
120
121 if (*bvprv && *cluster) {
122 if ((*sg)->length + nbytes > queue_max_segment_size(q))
123 goto new_segment;
124
125 if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
126 goto new_segment;
127 if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
128 goto new_segment;
129
130 (*sg)->length += nbytes;
131 } else {
132new_segment:
133 if (!*sg)
134 *sg = sglist;
135 else {
136 /*
137 * If the driver previously mapped a shorter
138 * list, we could see a termination bit
139 * prematurely unless it fully inits the sg
140 * table on each mapping. We KNOW that there
141 * must be more entries here or the driver
142 * would be buggy, so force clear the
143 * termination bit to avoid doing a full
144 * sg_init_table() in drivers for each command.
145 */
146 (*sg)->page_link &= ~0x02;
147 *sg = sg_next(*sg);
148 }
149
150 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
151 (*nsegs)++;
152 }
153 *bvprv = bvec;
154}
155
113/* 156/*
114 * map a request to scatterlist, return number of sg entries setup. Caller 157 * map a request to scatterlist, return number of sg entries setup. Caller
115 * must make sure sg can hold rq->nr_phys_segments entries 158 * must make sure sg can hold rq->nr_phys_segments entries
@@ -131,41 +174,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
131 bvprv = NULL; 174 bvprv = NULL;
132 sg = NULL; 175 sg = NULL;
133 rq_for_each_segment(bvec, rq, iter) { 176 rq_for_each_segment(bvec, rq, iter) {
134 int nbytes = bvec->bv_len; 177 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
135 178 &nsegs, &cluster);
136 if (bvprv && cluster) {
137 if (sg->length + nbytes > queue_max_segment_size(q))
138 goto new_segment;
139
140 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
141 goto new_segment;
142 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
143 goto new_segment;
144
145 sg->length += nbytes;
146 } else {
147new_segment:
148 if (!sg)
149 sg = sglist;
150 else {
151 /*
152 * If the driver previously mapped a shorter
153 * list, we could see a termination bit
154 * prematurely unless it fully inits the sg
155 * table on each mapping. We KNOW that there
156 * must be more entries here or the driver
157 * would be buggy, so force clear the
158 * termination bit to avoid doing a full
159 * sg_init_table() in drivers for each command.
160 */
161 sg->page_link &= ~0x02;
162 sg = sg_next(sg);
163 }
164
165 sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
166 nsegs++;
167 }
168 bvprv = bvec;
169 } /* segments in rq */ 179 } /* segments in rq */
170 180
171 181
@@ -199,6 +209,43 @@ new_segment:
199} 209}
200EXPORT_SYMBOL(blk_rq_map_sg); 210EXPORT_SYMBOL(blk_rq_map_sg);
201 211
212/**
213 * blk_bio_map_sg - map a bio to a scatterlist
214 * @q: request_queue in question
215 * @bio: bio being mapped
216 * @sglist: scatterlist being mapped
217 *
218 * Note:
219 * Caller must make sure sg can hold bio->bi_phys_segments entries
220 *
221 * Will return the number of sg entries setup
222 */
223int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
224 struct scatterlist *sglist)
225{
226 struct bio_vec *bvec, *bvprv;
227 struct scatterlist *sg;
228 int nsegs, cluster;
229 unsigned long i;
230
231 nsegs = 0;
232 cluster = blk_queue_cluster(q);
233
234 bvprv = NULL;
235 sg = NULL;
236 bio_for_each_segment(bvec, bio, i) {
237 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
238 &nsegs, &cluster);
239 } /* segments in bio */
240
241 if (sg)
242 sg_mark_end(sg);
243
244 BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
245 return nsegs;
246}
247EXPORT_SYMBOL(blk_bio_map_sg);
248
202static inline int ll_new_hw_segment(struct request_queue *q, 249static inline int ll_new_hw_segment(struct request_queue *q,
203 struct request *req, 250 struct request *req,
204 struct bio *bio) 251 struct bio *bio)
diff --git a/block/genhd.c b/block/genhd.c
index cac7366957c3..d839723303c8 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -835,7 +835,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
835 835
836static void *show_partition_start(struct seq_file *seqf, loff_t *pos) 836static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
837{ 837{
838 static void *p; 838 void *p;
839 839
840 p = disk_seqf_start(seqf, pos); 840 p = disk_seqf_start(seqf, pos);
841 if (!IS_ERR_OR_NULL(p) && !*pos) 841 if (!IS_ERR_OR_NULL(p) && !*pos)
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index ea4c6d52605a..29e51bc01383 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -387,6 +387,7 @@ acpi_get_table_with_size(char *signature,
387 387
388 return (AE_NOT_FOUND); 388 return (AE_NOT_FOUND);
389} 389}
390ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
390 391
391acpi_status 392acpi_status
392acpi_get_table(char *signature, 393acpi_get_table(char *signature,
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2be8ef1d3093..27cecd313e75 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -115,7 +115,7 @@ config SATA_SIL24
115 If unsure, say N. 115 If unsure, say N.
116 116
117config ATA_SFF 117config ATA_SFF
118 bool "ATA SFF support" 118 bool "ATA SFF support (for legacy IDE and PATA)"
119 default y 119 default y
120 help 120 help
121 This option adds support for ATA controllers with SFF 121 This option adds support for ATA controllers with SFF
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 062e6a1a248f..50d5dea0ff59 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -256,6 +256,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
256 { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */ 256 { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
257 { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */ 257 { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
258 { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */ 258 { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
259 { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
260 { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
261 { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
262 { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */
263 { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */
264 { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
265 { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
266 { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
259 267
260 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 268 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
261 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 269 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index c2594ddf25b0..57eb1c212a4c 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -320,6 +320,7 @@ extern struct device_attribute *ahci_sdev_attrs[];
320extern struct ata_port_operations ahci_ops; 320extern struct ata_port_operations ahci_ops;
321extern struct ata_port_operations ahci_pmp_retry_srst_ops; 321extern struct ata_port_operations ahci_pmp_retry_srst_ops;
322 322
323unsigned int ahci_dev_classify(struct ata_port *ap);
323void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 324void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
324 u32 opts); 325 u32 opts);
325void ahci_save_initial_config(struct device *dev, 326void ahci_save_initial_config(struct device *dev,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 3c809bfbccf5..ef773e12af79 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -329,6 +329,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
329 { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 329 { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
330 /* SATA Controller IDE (Lynx Point) */ 330 /* SATA Controller IDE (Lynx Point) */
331 { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 331 { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
332 /* SATA Controller IDE (Lynx Point-LP) */
333 { 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
334 /* SATA Controller IDE (Lynx Point-LP) */
335 { 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
336 /* SATA Controller IDE (Lynx Point-LP) */
337 { 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
338 /* SATA Controller IDE (Lynx Point-LP) */
339 { 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
332 /* SATA Controller IDE (DH89xxCC) */ 340 /* SATA Controller IDE (DH89xxCC) */
333 { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 341 { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
334 { } /* terminate list */ 342 { } /* terminate list */
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index f9eaa82311a9..555c07afa05b 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1139,7 +1139,7 @@ static void ahci_dev_config(struct ata_device *dev)
1139 } 1139 }
1140} 1140}
1141 1141
1142static unsigned int ahci_dev_classify(struct ata_port *ap) 1142unsigned int ahci_dev_classify(struct ata_port *ap)
1143{ 1143{
1144 void __iomem *port_mmio = ahci_port_base(ap); 1144 void __iomem *port_mmio = ahci_port_base(ap);
1145 struct ata_taskfile tf; 1145 struct ata_taskfile tf;
@@ -1153,6 +1153,7 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
1153 1153
1154 return ata_dev_classify(&tf); 1154 return ata_dev_classify(&tf);
1155} 1155}
1156EXPORT_SYMBOL_GPL(ahci_dev_classify);
1156 1157
1157void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 1158void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1158 u32 opts) 1159 u32 opts)
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 902b5a457170..fd9ecf74e631 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -60,17 +60,7 @@ acpi_handle ata_ap_acpi_handle(struct ata_port *ap)
60 if (ap->flags & ATA_FLAG_ACPI_SATA) 60 if (ap->flags & ATA_FLAG_ACPI_SATA)
61 return NULL; 61 return NULL;
62 62
63 /* 63 return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), ap->port_no);
64 * If acpi bind operation has already happened, we can get the handle
65 * for the port by checking the corresponding scsi_host device's
66 * firmware node, otherwise we will need to find out the handle from
67 * its parent's acpi node.
68 */
69 if (ap->scsi_host)
70 return DEVICE_ACPI_HANDLE(&ap->scsi_host->shost_gendev);
71 else
72 return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev),
73 ap->port_no);
74} 64}
75EXPORT_SYMBOL(ata_ap_acpi_handle); 65EXPORT_SYMBOL(ata_ap_acpi_handle);
76 66
@@ -1101,6 +1091,9 @@ static int ata_acpi_bind_host(struct ata_port *ap, acpi_handle *handle)
1101 if (!*handle) 1091 if (!*handle)
1102 return -ENODEV; 1092 return -ENODEV;
1103 1093
1094 if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
1095 ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
1096
1104 return 0; 1097 return 0;
1105} 1098}
1106 1099
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index fadd5866d40f..8e1039c8e159 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4062,7 +4062,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4062 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4062 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4063 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4063 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4064 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4064 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4065 { "2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4065 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4066 /* Odd clown on sil3726/4726 PMPs */ 4066 /* Odd clown on sil3726/4726 PMPs */
4067 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4067 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4068 4068
@@ -4128,6 +4128,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4128 4128
4129 /* Devices that do not need bridging limits applied */ 4129 /* Devices that do not need bridging limits applied */
4130 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4130 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4131 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4131 4132
4132 /* Devices which aren't very happy with higher link speeds */ 4133 /* Devices which aren't very happy with higher link speeds */
4133 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4134 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 361c75cea57b..24e51056ac26 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <scsi/scsi_host.h> 21#include <scsi/scsi_host.h>
22#include <linux/libata.h> 22#include <linux/libata.h>
23#include <linux/dmi.h>
23 24
24#define DRV_NAME "pata_atiixp" 25#define DRV_NAME "pata_atiixp"
25#define DRV_VERSION "0.4.6" 26#define DRV_VERSION "0.4.6"
@@ -33,11 +34,26 @@ enum {
33 ATIIXP_IDE_UDMA_MODE = 0x56 34 ATIIXP_IDE_UDMA_MODE = 0x56
34}; 35};
35 36
37static const struct dmi_system_id attixp_cable_override_dmi_table[] = {
38 {
39 /* Board has onboard PATA<->SATA converters */
40 .ident = "MSI E350DM-E33",
41 .matches = {
42 DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
43 DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"),
44 },
45 },
46 { }
47};
48
36static int atiixp_cable_detect(struct ata_port *ap) 49static int atiixp_cable_detect(struct ata_port *ap)
37{ 50{
38 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 51 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
39 u8 udma; 52 u8 udma;
40 53
54 if (dmi_check_system(attixp_cable_override_dmi_table))
55 return ATA_CBL_PATA40_SHORT;
56
41 /* Hack from drivers/ide/pci. Really we want to know how to do the 57 /* Hack from drivers/ide/pci. Really we want to know how to do the
42 raw detection not play follow the bios mode guess */ 58 raw detection not play follow the bios mode guess */
43 pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma); 59 pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index cdd01c52c629..5e6e00bc1652 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1912,8 +1912,8 @@ int __dev_printk(const char *level, const struct device *dev,
1912 "DEVICE=+%s:%s", subsys, dev_name(dev)); 1912 "DEVICE=+%s:%s", subsys, dev_name(dev));
1913 } 1913 }
1914skip: 1914skip:
1915 if (level[3]) 1915 if (level[2])
1916 level_extra = &level[3]; /* skip past "<L>" */ 1916 level_extra = &level[2]; /* skip past KERN_SOH "L" */
1917 1917
1918 return printk_emit(0, level[1] - '0', 1918 return printk_emit(0, level[1] - '0',
1919 dictlen ? dict : NULL, dictlen, 1919 dictlen ? dict : NULL, dictlen,
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index acda773b3720..38aa6dda6b81 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -763,16 +763,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
763 { 763 {
764 case CMD_TARGET_STATUS: 764 case CMD_TARGET_STATUS:
765 /* Pass it up to the upper layers... */ 765 /* Pass it up to the upper layers... */
766 if( ei->ScsiStatus) 766 if (!ei->ScsiStatus) {
767 {
768#if 0
769 printk(KERN_WARNING "cciss: cmd %p "
770 "has SCSI Status = %x\n",
771 c, ei->ScsiStatus);
772#endif
773 cmd->result |= (ei->ScsiStatus << 1);
774 }
775 else { /* scsi status is zero??? How??? */
776 767
777 /* Ordinarily, this case should never happen, but there is a bug 768 /* Ordinarily, this case should never happen, but there is a bug
778 in some released firmware revisions that allows it to happen 769 in some released firmware revisions that allows it to happen
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index ba91b408abad..d84566496746 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -889,6 +889,7 @@ struct bm_aio_ctx {
889 unsigned int done; 889 unsigned int done;
890 unsigned flags; 890 unsigned flags;
891#define BM_AIO_COPY_PAGES 1 891#define BM_AIO_COPY_PAGES 1
892#define BM_WRITE_ALL_PAGES 2
892 int error; 893 int error;
893 struct kref kref; 894 struct kref kref;
894}; 895};
@@ -1059,7 +1060,8 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
1059 if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) 1060 if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
1060 break; 1061 break;
1061 if (rw & WRITE) { 1062 if (rw & WRITE) {
1062 if (bm_test_page_unchanged(b->bm_pages[i])) { 1063 if (!(flags & BM_WRITE_ALL_PAGES) &&
1064 bm_test_page_unchanged(b->bm_pages[i])) {
1063 dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i); 1065 dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
1064 continue; 1066 continue;
1065 } 1067 }
@@ -1141,6 +1143,17 @@ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
1141} 1143}
1142 1144
1143/** 1145/**
1146 * drbd_bm_write_all() - Write the whole bitmap to its on disk location.
1147 * @mdev: DRBD device.
1148 *
1149 * Will write all pages.
1150 */
1151int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local)
1152{
1153 return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0);
1154}
1155
1156/**
1144 * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed. 1157 * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
1145 * @mdev: DRBD device. 1158 * @mdev: DRBD device.
1146 * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages 1159 * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index b2ca143d0053..b953cc7c9c00 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1469,6 +1469,7 @@ extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
1469extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local); 1469extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
1470extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); 1470extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
1471extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); 1471extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
1472extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local);
1472extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local); 1473extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
1473extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, 1474extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
1474 unsigned long al_enr); 1475 unsigned long al_enr);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index dbe6135a2abe..f93a0320e952 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -79,6 +79,7 @@ static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79static void md_sync_timer_fn(unsigned long data); 79static void md_sync_timer_fn(unsigned long data);
80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused); 80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused); 81static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
82static void _tl_clear(struct drbd_conf *mdev);
82 83
83MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " 84MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>"); 85 "Lars Ellenberg <lars@linbit.com>");
@@ -432,19 +433,10 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
432 433
433 /* Actions operating on the disk state, also want to work on 434 /* Actions operating on the disk state, also want to work on
434 requests that got barrier acked. */ 435 requests that got barrier acked. */
435 switch (what) {
436 case fail_frozen_disk_io:
437 case restart_frozen_disk_io:
438 list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
439 req = list_entry(le, struct drbd_request, tl_requests);
440 _req_mod(req, what);
441 }
442 436
443 case connection_lost_while_pending: 437 list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
444 case resend: 438 req = list_entry(le, struct drbd_request, tl_requests);
445 break; 439 _req_mod(req, what);
446 default:
447 dev_err(DEV, "what = %d in _tl_restart()\n", what);
448 } 440 }
449} 441}
450 442
@@ -459,11 +451,16 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
459 */ 451 */
460void tl_clear(struct drbd_conf *mdev) 452void tl_clear(struct drbd_conf *mdev)
461{ 453{
454 spin_lock_irq(&mdev->req_lock);
455 _tl_clear(mdev);
456 spin_unlock_irq(&mdev->req_lock);
457}
458
459static void _tl_clear(struct drbd_conf *mdev)
460{
462 struct list_head *le, *tle; 461 struct list_head *le, *tle;
463 struct drbd_request *r; 462 struct drbd_request *r;
464 463
465 spin_lock_irq(&mdev->req_lock);
466
467 _tl_restart(mdev, connection_lost_while_pending); 464 _tl_restart(mdev, connection_lost_while_pending);
468 465
469 /* we expect this list to be empty. */ 466 /* we expect this list to be empty. */
@@ -482,7 +479,6 @@ void tl_clear(struct drbd_conf *mdev)
482 479
483 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *)); 480 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
484 481
485 spin_unlock_irq(&mdev->req_lock);
486} 482}
487 483
488void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) 484void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
@@ -1476,12 +1472,12 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1476 if (ns.susp_fen) { 1472 if (ns.susp_fen) {
1477 /* case1: The outdate peer handler is successful: */ 1473 /* case1: The outdate peer handler is successful: */
1478 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) { 1474 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
1479 tl_clear(mdev);
1480 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 1475 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1481 drbd_uuid_new_current(mdev); 1476 drbd_uuid_new_current(mdev);
1482 clear_bit(NEW_CUR_UUID, &mdev->flags); 1477 clear_bit(NEW_CUR_UUID, &mdev->flags);
1483 } 1478 }
1484 spin_lock_irq(&mdev->req_lock); 1479 spin_lock_irq(&mdev->req_lock);
1480 _tl_clear(mdev);
1485 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); 1481 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1486 spin_unlock_irq(&mdev->req_lock); 1482 spin_unlock_irq(&mdev->req_lock);
1487 } 1483 }
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index fb9dce8daa24..edb490aad8b4 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -674,8 +674,8 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
674 la_size_changed && md_moved ? "size changed and md moved" : 674 la_size_changed && md_moved ? "size changed and md moved" :
675 la_size_changed ? "size changed" : "md moved"); 675 la_size_changed ? "size changed" : "md moved");
676 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ 676 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
677 err = drbd_bitmap_io(mdev, &drbd_bm_write, 677 err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
678 "size changed", BM_LOCKED_MASK); 678 "size changed", BM_LOCKED_MASK);
679 if (err) { 679 if (err) {
680 rv = dev_size_error; 680 rv = dev_size_error;
681 goto out; 681 goto out;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 910335c30927..01b2ac641c7b 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -695,6 +695,12 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
695 break; 695 break;
696 696
697 case resend: 697 case resend:
698 /* Simply complete (local only) READs. */
699 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
700 _req_may_be_done(req, m);
701 break;
702 }
703
698 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK 704 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
699 before the connection loss (B&C only); only P_BARRIER_ACK was missing. 705 before the connection loss (B&C only); only P_BARRIER_ACK was missing.
700 Trowing them out of the TL here by pretending we got a BARRIER_ACK 706 Trowing them out of the TL here by pretending we got a BARRIER_ACK
@@ -834,7 +840,15 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
834 req->private_bio = NULL; 840 req->private_bio = NULL;
835 } 841 }
836 if (rw == WRITE) { 842 if (rw == WRITE) {
837 remote = 1; 843 /* Need to replicate writes. Unless it is an empty flush,
844 * which is better mapped to a DRBD P_BARRIER packet,
845 * also for drbd wire protocol compatibility reasons. */
846 if (unlikely(size == 0)) {
847 /* The only size==0 bios we expect are empty flushes. */
848 D_ASSERT(bio->bi_rw & REQ_FLUSH);
849 remote = 0;
850 } else
851 remote = 1;
838 } else { 852 } else {
839 /* READ || READA */ 853 /* READ || READA */
840 if (local) { 854 if (local) {
@@ -870,8 +884,11 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
870 * extent. This waits for any resync activity in the corresponding 884 * extent. This waits for any resync activity in the corresponding
871 * resync extent to finish, and, if necessary, pulls in the target 885 * resync extent to finish, and, if necessary, pulls in the target
872 * extent into the activity log, which involves further disk io because 886 * extent into the activity log, which involves further disk io because
873 * of transactional on-disk meta data updates. */ 887 * of transactional on-disk meta data updates.
874 if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) { 888 * Empty flushes don't need to go into the activity log, they can only
889 * flush data for pending writes which are already in there. */
890 if (rw == WRITE && local && size
891 && !test_bit(AL_SUSPENDED, &mdev->flags)) {
875 req->rq_state |= RQ_IN_ACT_LOG; 892 req->rq_state |= RQ_IN_ACT_LOG;
876 drbd_al_begin_io(mdev, sector); 893 drbd_al_begin_io(mdev, sector);
877 } 894 }
@@ -994,7 +1011,10 @@ allocate_barrier:
994 if (rw == WRITE && _req_conflicts(req)) 1011 if (rw == WRITE && _req_conflicts(req))
995 goto fail_conflicting; 1012 goto fail_conflicting;
996 1013
997 list_add_tail(&req->tl_requests, &mdev->newest_tle->requests); 1014 /* no point in adding empty flushes to the transfer log,
1015 * they are mapped to drbd barriers already. */
1016 if (likely(size!=0))
1017 list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);
998 1018
999 /* NOTE remote first: to get the concurrent write detection right, 1019 /* NOTE remote first: to get the concurrent write detection right,
1000 * we must register the request before start of local IO. */ 1020 * we must register the request before start of local IO. */
@@ -1014,6 +1034,14 @@ allocate_barrier:
1014 mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) 1034 mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
1015 maybe_pull_ahead(mdev); 1035 maybe_pull_ahead(mdev);
1016 1036
1037 /* If this was a flush, queue a drbd barrier/start a new epoch.
1038 * Unless the current epoch was empty anyways, or we are not currently
1039 * replicating, in which case there is no point. */
1040 if (unlikely(bio->bi_rw & REQ_FLUSH)
1041 && mdev->newest_tle->n_writes
1042 && drbd_should_do_remote(mdev->state))
1043 queue_barrier(mdev);
1044
1017 spin_unlock_irq(&mdev->req_lock); 1045 spin_unlock_irq(&mdev->req_lock);
1018 kfree(b); /* if someone else has beaten us to it... */ 1046 kfree(b); /* if someone else has beaten us to it... */
1019 1047
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 10308cd8a7ed..11f36e502136 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -79,6 +79,7 @@ static struct usb_device_id ath3k_table[] = {
79 { USB_DEVICE(0x13d3, 0x3362) }, 79 { USB_DEVICE(0x13d3, 0x3362) },
80 { USB_DEVICE(0x0CF3, 0xE004) }, 80 { USB_DEVICE(0x0CF3, 0xE004) },
81 { USB_DEVICE(0x0930, 0x0219) }, 81 { USB_DEVICE(0x0930, 0x0219) },
82 { USB_DEVICE(0x0489, 0xe057) },
82 83
83 /* Atheros AR5BBU12 with sflash firmware */ 84 /* Atheros AR5BBU12 with sflash firmware */
84 { USB_DEVICE(0x0489, 0xE02C) }, 85 { USB_DEVICE(0x0489, 0xE02C) },
@@ -104,6 +105,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
104 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 105 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
105 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 106 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
106 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 107 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
108 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
107 109
108 /* Atheros AR5BBU22 with sflash firmware */ 110 /* Atheros AR5BBU22 with sflash firmware */
109 { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, 111 { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index e27221411036..cef3bac1a543 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -98,6 +98,7 @@ static struct usb_device_id btusb_table[] = {
98 { USB_DEVICE(0x0a5c, 0x21e6) }, 98 { USB_DEVICE(0x0a5c, 0x21e6) },
99 { USB_DEVICE(0x0a5c, 0x21e8) }, 99 { USB_DEVICE(0x0a5c, 0x21e8) },
100 { USB_DEVICE(0x0a5c, 0x21f3) }, 100 { USB_DEVICE(0x0a5c, 0x21f3) },
101 { USB_DEVICE(0x0a5c, 0x21f4) },
101 { USB_DEVICE(0x413c, 0x8197) }, 102 { USB_DEVICE(0x413c, 0x8197) },
102 103
103 /* Foxconn - Hon Hai */ 104 /* Foxconn - Hon Hai */
@@ -133,6 +134,7 @@ static struct usb_device_id blacklist_table[] = {
133 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 134 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
134 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 135 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
135 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 136 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
137 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
136 138
137 /* Atheros AR5BBU12 with sflash firmware */ 139 /* Atheros AR5BBU12 with sflash firmware */
138 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, 140 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 6f007b6c240d..6ec0fff79bc2 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -64,6 +64,7 @@
64#define I830_PTE_SYSTEM_CACHED 0x00000006 64#define I830_PTE_SYSTEM_CACHED 0x00000006
65/* GT PTE cache control fields */ 65/* GT PTE cache control fields */
66#define GEN6_PTE_UNCACHED 0x00000002 66#define GEN6_PTE_UNCACHED 0x00000002
67#define HSW_PTE_UNCACHED 0x00000000
67#define GEN6_PTE_LLC 0x00000004 68#define GEN6_PTE_LLC 0x00000004
68#define GEN6_PTE_LLC_MLC 0x00000006 69#define GEN6_PTE_LLC_MLC 0x00000006
69#define GEN6_PTE_GFDT 0x00000008 70#define GEN6_PTE_GFDT 0x00000008
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 08fc5cbb13cd..58e32f7c3229 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1156,6 +1156,30 @@ static bool gen6_check_flags(unsigned int flags)
1156 return true; 1156 return true;
1157} 1157}
1158 1158
1159static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
1160 unsigned int flags)
1161{
1162 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1163 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1164 u32 pte_flags;
1165
1166 if (type_mask == AGP_USER_MEMORY)
1167 pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
1168 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1169 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1170 if (gfdt)
1171 pte_flags |= GEN6_PTE_GFDT;
1172 } else { /* set 'normal'/'cached' to LLC by default */
1173 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1174 if (gfdt)
1175 pte_flags |= GEN6_PTE_GFDT;
1176 }
1177
1178 /* gen6 has bit11-4 for physical addr bit39-32 */
1179 addr |= (addr >> 28) & 0xff0;
1180 writel(addr | pte_flags, intel_private.gtt + entry);
1181}
1182
1159static void gen6_write_entry(dma_addr_t addr, unsigned int entry, 1183static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1160 unsigned int flags) 1184 unsigned int flags)
1161{ 1185{
@@ -1382,6 +1406,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
1382 .check_flags = gen6_check_flags, 1406 .check_flags = gen6_check_flags,
1383 .chipset_flush = i9xx_chipset_flush, 1407 .chipset_flush = i9xx_chipset_flush,
1384}; 1408};
1409static const struct intel_gtt_driver haswell_gtt_driver = {
1410 .gen = 6,
1411 .setup = i9xx_setup,
1412 .cleanup = gen6_cleanup,
1413 .write_entry = haswell_write_entry,
1414 .dma_mask_size = 40,
1415 .check_flags = gen6_check_flags,
1416 .chipset_flush = i9xx_chipset_flush,
1417};
1385static const struct intel_gtt_driver valleyview_gtt_driver = { 1418static const struct intel_gtt_driver valleyview_gtt_driver = {
1386 .gen = 7, 1419 .gen = 7,
1387 .setup = i9xx_setup, 1420 .setup = i9xx_setup,
@@ -1499,77 +1532,77 @@ static const struct intel_gtt_driver_description {
1499 { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG, 1532 { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
1500 "ValleyView", &valleyview_gtt_driver }, 1533 "ValleyView", &valleyview_gtt_driver },
1501 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG, 1534 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
1502 "Haswell", &sandybridge_gtt_driver }, 1535 "Haswell", &haswell_gtt_driver },
1503 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG, 1536 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
1504 "Haswell", &sandybridge_gtt_driver }, 1537 "Haswell", &haswell_gtt_driver },
1505 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG, 1538 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
1506 "Haswell", &sandybridge_gtt_driver }, 1539 "Haswell", &haswell_gtt_driver },
1507 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG, 1540 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
1508 "Haswell", &sandybridge_gtt_driver }, 1541 "Haswell", &haswell_gtt_driver },
1509 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG, 1542 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
1510 "Haswell", &sandybridge_gtt_driver }, 1543 "Haswell", &haswell_gtt_driver },
1511 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG, 1544 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
1512 "Haswell", &sandybridge_gtt_driver }, 1545 "Haswell", &haswell_gtt_driver },
1513 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG, 1546 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
1514 "Haswell", &sandybridge_gtt_driver }, 1547 "Haswell", &haswell_gtt_driver },
1515 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG, 1548 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
1516 "Haswell", &sandybridge_gtt_driver }, 1549 "Haswell", &haswell_gtt_driver },
1517 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG, 1550 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
1518 "Haswell", &sandybridge_gtt_driver }, 1551 "Haswell", &haswell_gtt_driver },
1519 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG, 1552 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
1520 "Haswell", &sandybridge_gtt_driver }, 1553 "Haswell", &haswell_gtt_driver },
1521 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG, 1554 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
1522 "Haswell", &sandybridge_gtt_driver }, 1555 "Haswell", &haswell_gtt_driver },
1523 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG, 1556 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
1524 "Haswell", &sandybridge_gtt_driver }, 1557 "Haswell", &haswell_gtt_driver },
1525 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG, 1558 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
1526 "Haswell", &sandybridge_gtt_driver }, 1559 "Haswell", &haswell_gtt_driver },
1527 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG, 1560 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
1528 "Haswell", &sandybridge_gtt_driver }, 1561 "Haswell", &haswell_gtt_driver },
1529 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG, 1562 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
1530 "Haswell", &sandybridge_gtt_driver }, 1563 "Haswell", &haswell_gtt_driver },
1531 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG, 1564 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
1532 "Haswell", &sandybridge_gtt_driver }, 1565 "Haswell", &haswell_gtt_driver },
1533 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG, 1566 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
1534 "Haswell", &sandybridge_gtt_driver }, 1567 "Haswell", &haswell_gtt_driver },
1535 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG, 1568 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
1536 "Haswell", &sandybridge_gtt_driver }, 1569 "Haswell", &haswell_gtt_driver },
1537 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG, 1570 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
1538 "Haswell", &sandybridge_gtt_driver }, 1571 "Haswell", &haswell_gtt_driver },
1539 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG, 1572 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
1540 "Haswell", &sandybridge_gtt_driver }, 1573 "Haswell", &haswell_gtt_driver },
1541 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG, 1574 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
1542 "Haswell", &sandybridge_gtt_driver }, 1575 "Haswell", &haswell_gtt_driver },
1543 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG, 1576 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
1544 "Haswell", &sandybridge_gtt_driver }, 1577 "Haswell", &haswell_gtt_driver },
1545 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG, 1578 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
1546 "Haswell", &sandybridge_gtt_driver }, 1579 "Haswell", &haswell_gtt_driver },
1547 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG, 1580 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
1548 "Haswell", &sandybridge_gtt_driver }, 1581 "Haswell", &haswell_gtt_driver },
1549 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG, 1582 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
1550 "Haswell", &sandybridge_gtt_driver }, 1583 "Haswell", &haswell_gtt_driver },
1551 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG, 1584 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
1552 "Haswell", &sandybridge_gtt_driver }, 1585 "Haswell", &haswell_gtt_driver },
1553 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG, 1586 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
1554 "Haswell", &sandybridge_gtt_driver }, 1587 "Haswell", &haswell_gtt_driver },
1555 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG, 1588 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
1556 "Haswell", &sandybridge_gtt_driver }, 1589 "Haswell", &haswell_gtt_driver },
1557 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG, 1590 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
1558 "Haswell", &sandybridge_gtt_driver }, 1591 "Haswell", &haswell_gtt_driver },
1559 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG, 1592 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
1560 "Haswell", &sandybridge_gtt_driver }, 1593 "Haswell", &haswell_gtt_driver },
1561 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG, 1594 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
1562 "Haswell", &sandybridge_gtt_driver }, 1595 "Haswell", &haswell_gtt_driver },
1563 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG, 1596 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
1564 "Haswell", &sandybridge_gtt_driver }, 1597 "Haswell", &haswell_gtt_driver },
1565 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG, 1598 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
1566 "Haswell", &sandybridge_gtt_driver }, 1599 "Haswell", &haswell_gtt_driver },
1567 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG, 1600 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
1568 "Haswell", &sandybridge_gtt_driver }, 1601 "Haswell", &haswell_gtt_driver },
1569 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG, 1602 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
1570 "Haswell", &sandybridge_gtt_driver }, 1603 "Haswell", &haswell_gtt_driver },
1571 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG, 1604 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
1572 "Haswell", &sandybridge_gtt_driver }, 1605 "Haswell", &haswell_gtt_driver },
1573 { 0, NULL, NULL } 1606 { 0, NULL, NULL }
1574}; 1607};
1575 1608
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index 540795cd0760..d9279385304d 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -53,7 +53,7 @@ static struct cs5535_mfgpt_timer *cs5535_event_clock;
53#define MFGPT_PERIODIC (MFGPT_HZ / HZ) 53#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
54 54
55/* 55/*
56 * The MFPGT timers on the CS5536 provide us with suitable timers to use 56 * The MFGPT timers on the CS5536 provide us with suitable timers to use
57 * as clock event sources - not as good as a HPET or APIC, but certainly 57 * as clock event sources - not as good as a HPET or APIC, but certainly
58 * better than the PIT. This isn't a general purpose MFGPT driver, but 58 * better than the PIT. This isn't a general purpose MFGPT driver, but
59 * a simplified one designed specifically to act as a clock event source. 59 * a simplified one designed specifically to act as a clock event source.
@@ -144,7 +144,7 @@ static int __init cs5535_mfgpt_init(void)
144 144
145 timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING); 145 timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
146 if (!timer) { 146 if (!timer) {
147 printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n"); 147 printk(KERN_ERR DRV_NAME ": Could not allocate MFGPT timer\n");
148 return -ENODEV; 148 return -ENODEV;
149 } 149 }
150 cs5535_event_clock = timer; 150 cs5535_event_clock = timer;
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 17fa04d08be9..b47034e650a5 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -218,7 +218,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
218 218
219 policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); 219 policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
220 220
221 if (atomic_inc_return(&freq_table_users) == 1) 221 if (!freq_table)
222 result = opp_init_cpufreq_table(mpu_dev, &freq_table); 222 result = opp_init_cpufreq_table(mpu_dev, &freq_table);
223 223
224 if (result) { 224 if (result) {
@@ -227,6 +227,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
227 goto fail_ck; 227 goto fail_ck;
228 } 228 }
229 229
230 atomic_inc_return(&freq_table_users);
231
230 result = cpufreq_frequency_table_cpuinfo(policy, freq_table); 232 result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
231 if (result) 233 if (result)
232 goto fail_table; 234 goto fail_table;
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 53c8c51d5881..93d14070141a 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -63,7 +63,7 @@ static void caam_jr_dequeue(unsigned long devarg)
63 63
64 head = ACCESS_ONCE(jrp->head); 64 head = ACCESS_ONCE(jrp->head);
65 65
66 spin_lock_bh(&jrp->outlock); 66 spin_lock(&jrp->outlock);
67 67
68 sw_idx = tail = jrp->tail; 68 sw_idx = tail = jrp->tail;
69 hw_idx = jrp->out_ring_read_index; 69 hw_idx = jrp->out_ring_read_index;
@@ -115,7 +115,7 @@ static void caam_jr_dequeue(unsigned long devarg)
115 jrp->tail = tail; 115 jrp->tail = tail;
116 } 116 }
117 117
118 spin_unlock_bh(&jrp->outlock); 118 spin_unlock(&jrp->outlock);
119 119
120 /* Finally, execute user's callback */ 120 /* Finally, execute user's callback */
121 usercall(dev, userdesc, userstatus, userarg); 121 usercall(dev, userdesc, userstatus, userarg);
@@ -236,14 +236,14 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
236 return -EIO; 236 return -EIO;
237 } 237 }
238 238
239 spin_lock(&jrp->inplock); 239 spin_lock_bh(&jrp->inplock);
240 240
241 head = jrp->head; 241 head = jrp->head;
242 tail = ACCESS_ONCE(jrp->tail); 242 tail = ACCESS_ONCE(jrp->tail);
243 243
244 if (!rd_reg32(&jrp->rregs->inpring_avail) || 244 if (!rd_reg32(&jrp->rregs->inpring_avail) ||
245 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { 245 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
246 spin_unlock(&jrp->inplock); 246 spin_unlock_bh(&jrp->inplock);
247 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); 247 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
248 return -EBUSY; 248 return -EBUSY;
249 } 249 }
@@ -265,7 +265,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
265 265
266 wr_reg32(&jrp->rregs->inpring_jobadd, 1); 266 wr_reg32(&jrp->rregs->inpring_jobadd, 1);
267 267
268 spin_unlock(&jrp->inplock); 268 spin_unlock_bh(&jrp->inplock);
269 269
270 return 0; 270 return 0;
271} 271}
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index c9c4befb5a8d..df14358d7fa1 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -821,8 +821,8 @@ static int hifn_register_rng(struct hifn_device *dev)
821 /* 821 /*
822 * We must wait at least 256 Pk_clk cycles between two reads of the rng. 822 * We must wait at least 256 Pk_clk cycles between two reads of the rng.
823 */ 823 */
824 dev->rng_wait_time = DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) * 824 dev->rng_wait_time = DIV_ROUND_UP_ULL(NSEC_PER_SEC,
825 256; 825 dev->pk_clk_freq) * 256;
826 826
827 dev->rng.name = dev->name; 827 dev->rng.name = dev->name;
828 dev->rng.data_present = hifn_rng_data_present, 828 dev->rng.data_present = hifn_rng_data_present,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 23120c00a881..90e28081712d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -22,6 +22,7 @@ menuconfig DRM
22config DRM_USB 22config DRM_USB
23 tristate 23 tristate
24 depends on DRM 24 depends on DRM
25 depends on USB_ARCH_HAS_HCD
25 select USB 26 select USB
26 27
27config DRM_KMS_HELPER 28config DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 08a7aa722d6b..6fbfc244748f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1981,7 +1981,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
1981 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1981 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1982 return -EINVAL; 1982 return -EINVAL;
1983 1983
1984 if (!req->flags) 1984 if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
1985 return -EINVAL; 1985 return -EINVAL;
1986 1986
1987 mutex_lock(&dev->mode_config.mutex); 1987 mutex_lock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a8743c399e83..b7ee230572b7 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -87,6 +87,9 @@ static struct edid_quirk {
87 int product_id; 87 int product_id;
88 u32 quirks; 88 u32 quirks;
89} edid_quirk_list[] = { 89} edid_quirk_list[] = {
90 /* ASUS VW222S */
91 { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
92
90 /* Acer AL1706 */ 93 /* Acer AL1706 */
91 { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, 94 { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
92 /* Acer F51 */ 95 /* Acer F51 */
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index b7adb4a967fd..28637c181b15 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -706,9 +706,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
706 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal); 706 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
707 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay); 707 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
708 p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal); 708 p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
709
710 p->crtc_hadjusted = false;
711 p->crtc_vadjusted = false;
712} 709}
713EXPORT_SYMBOL(drm_mode_set_crtcinfo); 710EXPORT_SYMBOL(drm_mode_set_crtcinfo);
714 711
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index 371c695322d9..da457b18eaaf 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -89,7 +89,7 @@ static const struct file_operations drm_proc_fops = {
89 * Create a given set of proc files represented by an array of 89 * Create a given set of proc files represented by an array of
90 * gdm_proc_lists in the given root directory. 90 * gdm_proc_lists in the given root directory.
91 */ 91 */
92int drm_proc_create_files(struct drm_info_list *files, int count, 92static int drm_proc_create_files(struct drm_info_list *files, int count,
93 struct proc_dir_entry *root, struct drm_minor *minor) 93 struct proc_dir_entry *root, struct drm_minor *minor)
94{ 94{
95 struct drm_device *dev = minor->dev; 95 struct drm_device *dev = minor->dev;
@@ -172,7 +172,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
172 return 0; 172 return 0;
173} 173}
174 174
175int drm_proc_remove_files(struct drm_info_list *files, int count, 175static int drm_proc_remove_files(struct drm_info_list *files, int count,
176 struct drm_minor *minor) 176 struct drm_minor *minor)
177{ 177{
178 struct list_head *pos, *q; 178 struct list_head *pos, *q;
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 30dc22a7156c..8033526bb53b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -1362,6 +1362,9 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
1362 (struct drm_connector **) (psb_intel_crtc + 1); 1362 (struct drm_connector **) (psb_intel_crtc + 1);
1363 psb_intel_crtc->mode_set.num_connectors = 0; 1363 psb_intel_crtc->mode_set.num_connectors = 0;
1364 psb_intel_cursor_init(dev, psb_intel_crtc); 1364 psb_intel_cursor_init(dev, psb_intel_crtc);
1365
1366 /* Set to true so that the pipe is forced off on initial config. */
1367 psb_intel_crtc->active = true;
1365} 1368}
1366 1369
1367int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 1370int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5c4657a54f97..489e2b162b27 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2365,6 +2365,10 @@ int i915_gpu_idle(struct drm_device *dev)
2365 2365
2366 /* Flush everything onto the inactive list. */ 2366 /* Flush everything onto the inactive list. */
2367 for_each_ring(ring, dev_priv, i) { 2367 for_each_ring(ring, dev_priv, i) {
2368 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2369 if (ret)
2370 return ret;
2371
2368 ret = i915_ring_idle(ring); 2372 ret = i915_ring_idle(ring);
2369 if (ret) 2373 if (ret)
2370 return ret; 2374 return ret;
@@ -2372,10 +2376,6 @@ int i915_gpu_idle(struct drm_device *dev)
2372 /* Is the device fubar? */ 2376 /* Is the device fubar? */
2373 if (WARN_ON(!list_empty(&ring->gpu_write_list))) 2377 if (WARN_ON(!list_empty(&ring->gpu_write_list)))
2374 return -EBUSY; 2378 return -EBUSY;
2375
2376 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2377 if (ret)
2378 return ret;
2379 } 2379 }
2380 2380
2381 return 0; 2381 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ee9b68f6bc36..60815b861ec2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -72,7 +72,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
72 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 72 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
73 * entries. For aliasing ppgtt support we just steal them at the end for 73 * entries. For aliasing ppgtt support we just steal them at the end for
74 * now. */ 74 * now. */
75 first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES; 75 first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
76 76
77 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 77 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
78 if (!ppgtt) 78 if (!ppgtt)
@@ -261,7 +261,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
261 pte_flags |= GEN6_PTE_CACHE_LLC; 261 pte_flags |= GEN6_PTE_CACHE_LLC;
262 break; 262 break;
263 case I915_CACHE_NONE: 263 case I915_CACHE_NONE:
264 pte_flags |= GEN6_PTE_UNCACHED; 264 if (IS_HASWELL(dev))
265 pte_flags |= HSW_PTE_UNCACHED;
266 else
267 pte_flags |= GEN6_PTE_UNCACHED;
265 break; 268 break;
266 default: 269 default:
267 BUG(); 270 BUG();
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index acc99b21e0b6..28725ce5b82c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -115,6 +115,7 @@
115 115
116#define GEN6_PTE_VALID (1 << 0) 116#define GEN6_PTE_VALID (1 << 0)
117#define GEN6_PTE_UNCACHED (1 << 1) 117#define GEN6_PTE_UNCACHED (1 << 1)
118#define HSW_PTE_UNCACHED (0)
118#define GEN6_PTE_CACHE_LLC (2 << 1) 119#define GEN6_PTE_CACHE_LLC (2 << 1)
119#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) 120#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
120#define GEN6_PTE_CACHE_BITS (3 << 1) 121#define GEN6_PTE_CACHE_BITS (3 << 1)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 7ed4a41c3965..23bdc8cd1458 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -326,6 +326,36 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
326 return ret; 326 return ret;
327} 327}
328 328
329static struct edid *intel_crt_get_edid(struct drm_connector *connector,
330 struct i2c_adapter *i2c)
331{
332 struct edid *edid;
333
334 edid = drm_get_edid(connector, i2c);
335
336 if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
337 DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
338 intel_gmbus_force_bit(i2c, true);
339 edid = drm_get_edid(connector, i2c);
340 intel_gmbus_force_bit(i2c, false);
341 }
342
343 return edid;
344}
345
346/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
347static int intel_crt_ddc_get_modes(struct drm_connector *connector,
348 struct i2c_adapter *adapter)
349{
350 struct edid *edid;
351
352 edid = intel_crt_get_edid(connector, adapter);
353 if (!edid)
354 return 0;
355
356 return intel_connector_update_modes(connector, edid);
357}
358
329static bool intel_crt_detect_ddc(struct drm_connector *connector) 359static bool intel_crt_detect_ddc(struct drm_connector *connector)
330{ 360{
331 struct intel_crt *crt = intel_attached_crt(connector); 361 struct intel_crt *crt = intel_attached_crt(connector);
@@ -336,7 +366,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
336 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); 366 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
337 367
338 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); 368 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
339 edid = drm_get_edid(connector, i2c); 369 edid = intel_crt_get_edid(connector, i2c);
340 370
341 if (edid) { 371 if (edid) {
342 bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; 372 bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
@@ -544,13 +574,13 @@ static int intel_crt_get_modes(struct drm_connector *connector)
544 struct i2c_adapter *i2c; 574 struct i2c_adapter *i2c;
545 575
546 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); 576 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
547 ret = intel_ddc_get_modes(connector, i2c); 577 ret = intel_crt_ddc_get_modes(connector, i2c);
548 if (ret || !IS_G4X(dev)) 578 if (ret || !IS_G4X(dev))
549 return ret; 579 return ret;
550 580
551 /* Try to probe digital port for output in DVI-I -> VGA mode. */ 581 /* Try to probe digital port for output in DVI-I -> VGA mode. */
552 i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); 582 i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
553 return intel_ddc_get_modes(connector, i2c); 583 return intel_crt_ddc_get_modes(connector, i2c);
554} 584}
555 585
556static int intel_crt_set_property(struct drm_connector *connector, 586static int intel_crt_set_property(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a69a3d0d3acf..2dfa6cf4886b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1384,7 +1384,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1384 enum pipe pipe, int reg) 1384 enum pipe pipe, int reg)
1385{ 1385{
1386 u32 val = I915_READ(reg); 1386 u32 val = I915_READ(reg);
1387 WARN(hdmi_pipe_enabled(dev_priv, val, pipe), 1387 WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1388 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1388 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1389 reg, pipe_name(pipe)); 1389 reg, pipe_name(pipe));
1390 1390
@@ -1404,13 +1404,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1404 1404
1405 reg = PCH_ADPA; 1405 reg = PCH_ADPA;
1406 val = I915_READ(reg); 1406 val = I915_READ(reg);
1407 WARN(adpa_pipe_enabled(dev_priv, val, pipe), 1407 WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1408 "PCH VGA enabled on transcoder %c, should be disabled\n", 1408 "PCH VGA enabled on transcoder %c, should be disabled\n",
1409 pipe_name(pipe)); 1409 pipe_name(pipe));
1410 1410
1411 reg = PCH_LVDS; 1411 reg = PCH_LVDS;
1412 val = I915_READ(reg); 1412 val = I915_READ(reg);
1413 WARN(lvds_pipe_enabled(dev_priv, val, pipe), 1413 WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1414 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1414 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1415 pipe_name(pipe)); 1415 pipe_name(pipe));
1416 1416
@@ -1872,7 +1872,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1872 enum pipe pipe, int reg) 1872 enum pipe pipe, int reg)
1873{ 1873{
1874 u32 val = I915_READ(reg); 1874 u32 val = I915_READ(reg);
1875 if (hdmi_pipe_enabled(dev_priv, val, pipe)) { 1875 if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
1876 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", 1876 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1877 reg, pipe); 1877 reg, pipe);
1878 I915_WRITE(reg, val & ~PORT_ENABLE); 1878 I915_WRITE(reg, val & ~PORT_ENABLE);
@@ -1894,12 +1894,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1894 1894
1895 reg = PCH_ADPA; 1895 reg = PCH_ADPA;
1896 val = I915_READ(reg); 1896 val = I915_READ(reg);
1897 if (adpa_pipe_enabled(dev_priv, val, pipe)) 1897 if (adpa_pipe_enabled(dev_priv, pipe, val))
1898 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); 1898 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1899 1899
1900 reg = PCH_LVDS; 1900 reg = PCH_LVDS;
1901 val = I915_READ(reg); 1901 val = I915_READ(reg);
1902 if (lvds_pipe_enabled(dev_priv, val, pipe)) { 1902 if (lvds_pipe_enabled(dev_priv, pipe, val)) {
1903 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); 1903 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1904 I915_WRITE(reg, val & ~LVDS_PORT_EN); 1904 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1905 POSTING_READ(reg); 1905 POSTING_READ(reg);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 132ab511b90c..cd54cf88a28f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -342,6 +342,8 @@ struct intel_fbc_work {
342 int interval; 342 int interval;
343}; 343};
344 344
345int intel_connector_update_modes(struct drm_connector *connector,
346 struct edid *edid);
345int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 347int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
346 348
347extern void intel_attach_force_audio_property(struct drm_connector *connector); 349extern void intel_attach_force_audio_property(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e05c0d3e3440..e9a6f6aaed85 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -780,6 +780,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
780 DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), 780 DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
781 }, 781 },
782 }, 782 },
783 {
784 .callback = intel_no_lvds_dmi_callback,
785 .ident = "Gigabyte GA-D525TUD",
786 .matches = {
787 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
788 DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
789 },
790 },
783 791
784 { } /* terminating entry */ 792 { } /* terminating entry */
785}; 793};
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 45848b9b670b..29b72593fbb2 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -33,6 +33,25 @@
33#include "i915_drv.h" 33#include "i915_drv.h"
34 34
35/** 35/**
36 * intel_connector_update_modes - update connector from edid
37 * @connector: DRM connector device to use
38 * @edid: previously read EDID information
39 */
40int intel_connector_update_modes(struct drm_connector *connector,
41 struct edid *edid)
42{
43 int ret;
44
45 drm_mode_connector_update_edid_property(connector, edid);
46 ret = drm_add_edid_modes(connector, edid);
47 drm_edid_to_eld(connector, edid);
48 connector->display_info.raw_edid = NULL;
49 kfree(edid);
50
51 return ret;
52}
53
54/**
36 * intel_ddc_get_modes - get modelist from monitor 55 * intel_ddc_get_modes - get modelist from monitor
37 * @connector: DRM connector device to use 56 * @connector: DRM connector device to use
38 * @adapter: i2c adapter 57 * @adapter: i2c adapter
@@ -43,18 +62,12 @@ int intel_ddc_get_modes(struct drm_connector *connector,
43 struct i2c_adapter *adapter) 62 struct i2c_adapter *adapter)
44{ 63{
45 struct edid *edid; 64 struct edid *edid;
46 int ret = 0;
47 65
48 edid = drm_get_edid(connector, adapter); 66 edid = drm_get_edid(connector, adapter);
49 if (edid) { 67 if (!edid)
50 drm_mode_connector_update_edid_property(connector, edid); 68 return 0;
51 ret = drm_add_edid_modes(connector, edid);
52 drm_edid_to_eld(connector, edid);
53 connector->display_info.raw_edid = NULL;
54 kfree(edid);
55 }
56 69
57 return ret; 70 return intel_connector_update_modes(connector, edid);
58} 71}
59 72
60static const struct drm_prop_enum_list force_audio_names[] = { 73static const struct drm_prop_enum_list force_audio_names[] = {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 58c07cdafb7e..1881c8c83f0e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2441,17 +2441,10 @@ static void gen6_enable_rps(struct drm_device *dev)
2441 dev_priv->max_delay << 24 | 2441 dev_priv->max_delay << 24 |
2442 dev_priv->min_delay << 16); 2442 dev_priv->min_delay << 16);
2443 2443
2444 if (IS_HASWELL(dev)) { 2444 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
2445 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 2445 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
2446 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 2446 I915_WRITE(GEN6_RP_UP_EI, 66000);
2447 I915_WRITE(GEN6_RP_UP_EI, 66000); 2447 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
2448 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
2449 } else {
2450 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
2451 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
2452 I915_WRITE(GEN6_RP_UP_EI, 100000);
2453 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
2454 }
2455 2448
2456 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 2449 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2457 I915_WRITE(GEN6_RP_CONTROL, 2450 I915_WRITE(GEN6_RP_CONTROL,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d172e9873131..d81bb0bf2885 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1692,6 +1692,7 @@ static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
1692 edid = intel_sdvo_get_edid(connector); 1692 edid = intel_sdvo_get_edid(connector);
1693 if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL) 1693 if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
1694 has_audio = drm_detect_monitor_audio(edid); 1694 has_audio = drm_detect_monitor_audio(edid);
1695 kfree(edid);
1695 1696
1696 return has_audio; 1697 return has_audio;
1697} 1698}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index cc8df4de2d92..7644f31a3778 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -60,11 +60,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
60 60
61 switch (fb->pixel_format) { 61 switch (fb->pixel_format) {
62 case DRM_FORMAT_XBGR8888: 62 case DRM_FORMAT_XBGR8888:
63 sprctl |= SPRITE_FORMAT_RGBX888; 63 sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
64 pixel_size = 4; 64 pixel_size = 4;
65 break; 65 break;
66 case DRM_FORMAT_XRGB8888: 66 case DRM_FORMAT_XRGB8888:
67 sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; 67 sprctl |= SPRITE_FORMAT_RGBX888;
68 pixel_size = 4; 68 pixel_size = 4;
69 break; 69 break;
70 case DRM_FORMAT_YUYV: 70 case DRM_FORMAT_YUYV:
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index fc841e87b343..26ebffebe710 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -211,11 +211,6 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
211 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); 211 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
212} 212}
213 213
214static int nouveau_dsm_init(void)
215{
216 return 0;
217}
218
219static int nouveau_dsm_get_client_id(struct pci_dev *pdev) 214static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
220{ 215{
221 /* easy option one - intel vendor ID means Integrated */ 216 /* easy option one - intel vendor ID means Integrated */
@@ -232,7 +227,6 @@ static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
232static struct vga_switcheroo_handler nouveau_dsm_handler = { 227static struct vga_switcheroo_handler nouveau_dsm_handler = {
233 .switchto = nouveau_dsm_switchto, 228 .switchto = nouveau_dsm_switchto,
234 .power_state = nouveau_dsm_power_state, 229 .power_state = nouveau_dsm_power_state,
235 .init = nouveau_dsm_init,
236 .get_client_id = nouveau_dsm_get_client_id, 230 .get_client_id = nouveau_dsm_get_client_id,
237}; 231};
238 232
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 1866dbb49979..c61014442aa9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -736,9 +736,11 @@ nouveau_card_init(struct drm_device *dev)
736 } 736 }
737 break; 737 break;
738 case NV_C0: 738 case NV_C0:
739 nvc0_copy_create(dev, 1); 739 if (!(nv_rd32(dev, 0x022500) & 0x00000200))
740 nvc0_copy_create(dev, 1);
740 case NV_D0: 741 case NV_D0:
741 nvc0_copy_create(dev, 0); 742 if (!(nv_rd32(dev, 0x022500) & 0x00000100))
743 nvc0_copy_create(dev, 0);
742 break; 744 break;
743 default: 745 default:
744 break; 746 break;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c6fcb5b86a45..2817101fb167 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -258,7 +258,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
258 radeon_crtc->enabled = true; 258 radeon_crtc->enabled = true;
259 /* adjust pm to dpms changes BEFORE enabling crtcs */ 259 /* adjust pm to dpms changes BEFORE enabling crtcs */
260 radeon_pm_compute_clocks(rdev); 260 radeon_pm_compute_clocks(rdev);
261 /* disable crtc pair power gating before programming */
262 if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) 261 if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
263 atombios_powergate_crtc(crtc, ATOM_DISABLE); 262 atombios_powergate_crtc(crtc, ATOM_DISABLE);
264 atombios_enable_crtc(crtc, ATOM_ENABLE); 263 atombios_enable_crtc(crtc, ATOM_ENABLE);
@@ -278,25 +277,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
278 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); 277 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
279 atombios_enable_crtc(crtc, ATOM_DISABLE); 278 atombios_enable_crtc(crtc, ATOM_DISABLE);
280 radeon_crtc->enabled = false; 279 radeon_crtc->enabled = false;
281 /* power gating is per-pair */ 280 if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
282 if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) { 281 atombios_powergate_crtc(crtc, ATOM_ENABLE);
283 struct drm_crtc *other_crtc;
284 struct radeon_crtc *other_radeon_crtc;
285 list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) {
286 other_radeon_crtc = to_radeon_crtc(other_crtc);
287 if (((radeon_crtc->crtc_id == 0) && (other_radeon_crtc->crtc_id == 1)) ||
288 ((radeon_crtc->crtc_id == 1) && (other_radeon_crtc->crtc_id == 0)) ||
289 ((radeon_crtc->crtc_id == 2) && (other_radeon_crtc->crtc_id == 3)) ||
290 ((radeon_crtc->crtc_id == 3) && (other_radeon_crtc->crtc_id == 2)) ||
291 ((radeon_crtc->crtc_id == 4) && (other_radeon_crtc->crtc_id == 5)) ||
292 ((radeon_crtc->crtc_id == 5) && (other_radeon_crtc->crtc_id == 4))) {
293 /* if both crtcs in the pair are off, enable power gating */
294 if (other_radeon_crtc->enabled == false)
295 atombios_powergate_crtc(crtc, ATOM_ENABLE);
296 break;
297 }
298 }
299 }
300 /* adjust pm to dpms changes AFTER disabling crtcs */ 282 /* adjust pm to dpms changes AFTER disabling crtcs */
301 radeon_pm_compute_clocks(rdev); 283 radeon_pm_compute_clocks(rdev);
302 break; 284 break;
@@ -444,11 +426,28 @@ union atom_enable_ss {
444static void atombios_crtc_program_ss(struct radeon_device *rdev, 426static void atombios_crtc_program_ss(struct radeon_device *rdev,
445 int enable, 427 int enable,
446 int pll_id, 428 int pll_id,
429 int crtc_id,
447 struct radeon_atom_ss *ss) 430 struct radeon_atom_ss *ss)
448{ 431{
432 unsigned i;
449 int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); 433 int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
450 union atom_enable_ss args; 434 union atom_enable_ss args;
451 435
436 if (!enable) {
437 for (i = 0; i < rdev->num_crtc; i++) {
438 if (rdev->mode_info.crtcs[i] &&
439 rdev->mode_info.crtcs[i]->enabled &&
440 i != crtc_id &&
441 pll_id == rdev->mode_info.crtcs[i]->pll_id) {
442 /* one other crtc is using this pll don't turn
443 * off spread spectrum as it might turn off
444 * display on active crtc
445 */
446 return;
447 }
448 }
449 }
450
452 memset(&args, 0, sizeof(args)); 451 memset(&args, 0, sizeof(args));
453 452
454 if (ASIC_IS_DCE5(rdev)) { 453 if (ASIC_IS_DCE5(rdev)) {
@@ -1028,7 +1027,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
1028 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 1027 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
1029 &ref_div, &post_div); 1028 &ref_div, &post_div);
1030 1029
1031 atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss); 1030 atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
1032 1031
1033 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1032 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1034 encoder_mode, radeon_encoder->encoder_id, mode->clock, 1033 encoder_mode, radeon_encoder->encoder_id, mode->clock,
@@ -1051,7 +1050,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
1051 ss.step = step_size; 1050 ss.step = step_size;
1052 } 1051 }
1053 1052
1054 atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss); 1053 atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
1055 } 1054 }
1056} 1055}
1057 1056
@@ -1572,11 +1571,11 @@ void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
1572 ASIC_INTERNAL_SS_ON_DCPLL, 1571 ASIC_INTERNAL_SS_ON_DCPLL,
1573 rdev->clock.default_dispclk); 1572 rdev->clock.default_dispclk);
1574 if (ss_enabled) 1573 if (ss_enabled)
1575 atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss); 1574 atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
1576 /* XXX: DCE5, make sure voltage, dispclk is high enough */ 1575 /* XXX: DCE5, make sure voltage, dispclk is high enough */
1577 atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk); 1576 atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
1578 if (ss_enabled) 1577 if (ss_enabled)
1579 atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss); 1578 atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
1580 } 1579 }
1581 1580
1582} 1581}
@@ -1665,9 +1664,22 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1665 struct drm_device *dev = crtc->dev; 1664 struct drm_device *dev = crtc->dev;
1666 struct radeon_device *rdev = dev->dev_private; 1665 struct radeon_device *rdev = dev->dev_private;
1667 struct radeon_atom_ss ss; 1666 struct radeon_atom_ss ss;
1667 int i;
1668 1668
1669 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1669 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1670 1670
1671 for (i = 0; i < rdev->num_crtc; i++) {
1672 if (rdev->mode_info.crtcs[i] &&
1673 rdev->mode_info.crtcs[i]->enabled &&
1674 i != radeon_crtc->crtc_id &&
1675 radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
1676 /* one other crtc is using this pll don't turn
1677 * off the pll
1678 */
1679 goto done;
1680 }
1681 }
1682
1671 switch (radeon_crtc->pll_id) { 1683 switch (radeon_crtc->pll_id) {
1672 case ATOM_PPLL1: 1684 case ATOM_PPLL1:
1673 case ATOM_PPLL2: 1685 case ATOM_PPLL2:
@@ -1684,6 +1696,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1684 default: 1696 default:
1685 break; 1697 break;
1686 } 1698 }
1699done:
1687 radeon_crtc->pll_id = -1; 1700 radeon_crtc->pll_id = -1;
1688} 1701}
1689 1702
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 7712cf5ab33b..3623b98ed3fe 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -577,30 +577,25 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
577 struct radeon_device *rdev = dev->dev_private; 577 struct radeon_device *rdev = dev->dev_private;
578 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 578 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
579 int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; 579 int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
580 u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
581 u8 tmp;
580 582
581 if (!ASIC_IS_DCE4(rdev)) 583 if (!ASIC_IS_DCE4(rdev))
582 return panel_mode; 584 return panel_mode;
583 585
584 if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == 586 if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
585 ENCODER_OBJECT_ID_NUTMEG) 587 /* DP bridge chips */
586 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; 588 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
587 else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == 589 if (tmp & 1)
588 ENCODER_OBJECT_ID_TRAVIS) { 590 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
589 u8 id[6]; 591 else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
590 int i; 592 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
591 for (i = 0; i < 6; i++)
592 id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i);
593 if (id[0] == 0x73 &&
594 id[1] == 0x69 &&
595 id[2] == 0x76 &&
596 id[3] == 0x61 &&
597 id[4] == 0x72 &&
598 id[5] == 0x54)
599 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; 593 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
600 else 594 else
601 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 595 panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
602 } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 596 } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
603 u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); 597 /* eDP */
598 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
604 if (tmp & 1) 599 if (tmp & 1)
605 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 600 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
606 } 601 }
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index f9bc27fe269a..6e8803a1170c 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1379,6 +1379,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1379 struct drm_device *dev = encoder->dev; 1379 struct drm_device *dev = encoder->dev;
1380 struct radeon_device *rdev = dev->dev_private; 1380 struct radeon_device *rdev = dev->dev_private;
1381 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1381 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1382 struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
1383 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1382 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1384 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1383 struct radeon_connector *radeon_connector = NULL; 1385 struct radeon_connector *radeon_connector = NULL;
1384 struct radeon_connector_atom_dig *radeon_dig_connector = NULL; 1386 struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
@@ -1390,19 +1392,37 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1390 1392
1391 switch (mode) { 1393 switch (mode) {
1392 case DRM_MODE_DPMS_ON: 1394 case DRM_MODE_DPMS_ON:
1393 /* some early dce3.2 boards have a bug in their transmitter control table */ 1395 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
1394 if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) || 1396 if (!connector)
1395 ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { 1397 dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
1396 if (ASIC_IS_DCE6(rdev)) { 1398 else
1397 /* It seems we need to call ATOM_ENCODER_CMD_SETUP again 1399 dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
1398 * before reenabling encoder on DPMS ON, otherwise we never 1400
1399 * get picture 1401 /* setup and enable the encoder */
1400 */ 1402 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1401 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); 1403 atombios_dig_encoder_setup(encoder,
1404 ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
1405 dig->panel_mode);
1406 if (ext_encoder) {
1407 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
1408 atombios_external_encoder_setup(encoder, ext_encoder,
1409 EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
1402 } 1410 }
1403 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1411 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1404 } else { 1412 } else if (ASIC_IS_DCE4(rdev)) {
1413 /* setup and enable the encoder */
1414 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1415 /* enable the transmitter */
1416 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1405 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1417 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1418 } else {
1419 /* setup and enable the encoder and transmitter */
1420 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1421 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1422 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1423 /* some early dce3.2 boards have a bug in their transmitter control table */
1424 if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
1425 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1406 } 1426 }
1407 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { 1427 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
1408 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1428 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
@@ -1420,10 +1440,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1420 case DRM_MODE_DPMS_STANDBY: 1440 case DRM_MODE_DPMS_STANDBY:
1421 case DRM_MODE_DPMS_SUSPEND: 1441 case DRM_MODE_DPMS_SUSPEND:
1422 case DRM_MODE_DPMS_OFF: 1442 case DRM_MODE_DPMS_OFF:
1423 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) 1443 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
1444 /* disable the transmitter */
1424 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1445 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1425 else 1446 } else if (ASIC_IS_DCE4(rdev)) {
1447 /* disable the transmitter */
1448 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1449 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1450 } else {
1451 /* disable the encoder and transmitter */
1426 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); 1452 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1453 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1454 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1455 }
1427 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { 1456 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
1428 if (ASIC_IS_DCE4(rdev)) 1457 if (ASIC_IS_DCE4(rdev))
1429 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); 1458 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
@@ -1740,13 +1769,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1740 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 1769 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1741 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1770 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1742 struct drm_encoder *test_encoder; 1771 struct drm_encoder *test_encoder;
1743 struct radeon_encoder_atom_dig *dig; 1772 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1744 uint32_t dig_enc_in_use = 0; 1773 uint32_t dig_enc_in_use = 0;
1745 1774
1746 /* DCE4/5 */ 1775 if (ASIC_IS_DCE6(rdev)) {
1747 if (ASIC_IS_DCE4(rdev)) { 1776 /* DCE6 */
1748 dig = radeon_encoder->enc_priv; 1777 switch (radeon_encoder->encoder_id) {
1749 if (ASIC_IS_DCE41(rdev)) { 1778 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1779 if (dig->linkb)
1780 return 1;
1781 else
1782 return 0;
1783 break;
1784 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1785 if (dig->linkb)
1786 return 3;
1787 else
1788 return 2;
1789 break;
1790 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1791 if (dig->linkb)
1792 return 5;
1793 else
1794 return 4;
1795 break;
1796 }
1797 } else if (ASIC_IS_DCE4(rdev)) {
1798 /* DCE4/5 */
1799 if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
1750 /* ontario follows DCE4 */ 1800 /* ontario follows DCE4 */
1751 if (rdev->family == CHIP_PALM) { 1801 if (rdev->family == CHIP_PALM) {
1752 if (dig->linkb) 1802 if (dig->linkb)
@@ -1848,10 +1898,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1848 struct drm_device *dev = encoder->dev; 1898 struct drm_device *dev = encoder->dev;
1849 struct radeon_device *rdev = dev->dev_private; 1899 struct radeon_device *rdev = dev->dev_private;
1850 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1900 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1851 struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
1852 1901
1853 radeon_encoder->pixel_clock = adjusted_mode->clock; 1902 radeon_encoder->pixel_clock = adjusted_mode->clock;
1854 1903
1904 /* need to call this here rather than in prepare() since we need some crtc info */
1905 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1906
1855 if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { 1907 if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
1856 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) 1908 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
1857 atombios_yuv_setup(encoder, true); 1909 atombios_yuv_setup(encoder, true);
@@ -1870,38 +1922,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1870 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1922 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1871 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1923 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1872 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1924 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1873 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { 1925 /* handled in dpms */
1874 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1875 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1876
1877 if (!connector)
1878 dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
1879 else
1880 dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
1881
1882 /* setup and enable the encoder */
1883 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1884 atombios_dig_encoder_setup(encoder,
1885 ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
1886 dig->panel_mode);
1887 } else if (ASIC_IS_DCE4(rdev)) {
1888 /* disable the transmitter */
1889 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1890 /* setup and enable the encoder */
1891 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1892
1893 /* enable the transmitter */
1894 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1895 } else {
1896 /* disable the encoder and transmitter */
1897 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1898 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1899
1900 /* setup and enable the encoder and transmitter */
1901 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1902 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1903 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1904 }
1905 break; 1926 break;
1906 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1927 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1907 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1928 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
@@ -1922,14 +1943,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1922 break; 1943 break;
1923 } 1944 }
1924 1945
1925 if (ext_encoder) {
1926 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
1927 atombios_external_encoder_setup(encoder, ext_encoder,
1928 EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
1929 else
1930 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1931 }
1932
1933 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1946 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1934 1947
1935 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 1948 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
@@ -2116,7 +2129,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
2116 } 2129 }
2117 2130
2118 radeon_atom_output_lock(encoder, true); 2131 radeon_atom_output_lock(encoder, true);
2119 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2120 2132
2121 if (connector) { 2133 if (connector) {
2122 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 2134 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -2137,6 +2149,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
2137 2149
2138static void radeon_atom_encoder_commit(struct drm_encoder *encoder) 2150static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
2139{ 2151{
2152 /* need to call this here as we need the crtc set up */
2140 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); 2153 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
2141 radeon_atom_output_lock(encoder, false); 2154 radeon_atom_output_lock(encoder, false);
2142} 2155}
@@ -2177,14 +2190,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
2177 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2190 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2178 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2191 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2179 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 2192 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2180 if (ASIC_IS_DCE4(rdev)) 2193 /* handled in dpms */
2181 /* disable the transmitter */
2182 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
2183 else {
2184 /* disable the encoder and transmitter */
2185 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
2186 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
2187 }
2188 break; 2194 break;
2189 case ENCODER_OBJECT_ID_INTERNAL_DDI: 2195 case ENCODER_OBJECT_ID_INTERNAL_DDI:
2190 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 2196 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 3dab49cb1d4a..f37676d7f217 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -47,18 +47,23 @@ struct r600_cs_track {
47 u32 npipes; 47 u32 npipes;
48 /* value we track */ 48 /* value we track */
49 u32 sq_config; 49 u32 sq_config;
50 u32 log_nsamples;
50 u32 nsamples; 51 u32 nsamples;
51 u32 cb_color_base_last[8]; 52 u32 cb_color_base_last[8];
52 struct radeon_bo *cb_color_bo[8]; 53 struct radeon_bo *cb_color_bo[8];
53 u64 cb_color_bo_mc[8]; 54 u64 cb_color_bo_mc[8];
54 u32 cb_color_bo_offset[8]; 55 u64 cb_color_bo_offset[8];
55 struct radeon_bo *cb_color_frag_bo[8]; /* unused */ 56 struct radeon_bo *cb_color_frag_bo[8];
56 struct radeon_bo *cb_color_tile_bo[8]; /* unused */ 57 u64 cb_color_frag_offset[8];
58 struct radeon_bo *cb_color_tile_bo[8];
59 u64 cb_color_tile_offset[8];
60 u32 cb_color_mask[8];
57 u32 cb_color_info[8]; 61 u32 cb_color_info[8];
58 u32 cb_color_view[8]; 62 u32 cb_color_view[8];
59 u32 cb_color_size_idx[8]; /* unused */ 63 u32 cb_color_size_idx[8]; /* unused */
60 u32 cb_target_mask; 64 u32 cb_target_mask;
61 u32 cb_shader_mask; /* unused */ 65 u32 cb_shader_mask; /* unused */
66 bool is_resolve;
62 u32 cb_color_size[8]; 67 u32 cb_color_size[8];
63 u32 vgt_strmout_en; 68 u32 vgt_strmout_en;
64 u32 vgt_strmout_buffer_en; 69 u32 vgt_strmout_buffer_en;
@@ -311,7 +316,15 @@ static void r600_cs_track_init(struct r600_cs_track *track)
311 track->cb_color_bo[i] = NULL; 316 track->cb_color_bo[i] = NULL;
312 track->cb_color_bo_offset[i] = 0xFFFFFFFF; 317 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
313 track->cb_color_bo_mc[i] = 0xFFFFFFFF; 318 track->cb_color_bo_mc[i] = 0xFFFFFFFF;
314 } 319 track->cb_color_frag_bo[i] = NULL;
320 track->cb_color_frag_offset[i] = 0xFFFFFFFF;
321 track->cb_color_tile_bo[i] = NULL;
322 track->cb_color_tile_offset[i] = 0xFFFFFFFF;
323 track->cb_color_mask[i] = 0xFFFFFFFF;
324 }
325 track->is_resolve = false;
326 track->nsamples = 16;
327 track->log_nsamples = 4;
315 track->cb_target_mask = 0xFFFFFFFF; 328 track->cb_target_mask = 0xFFFFFFFF;
316 track->cb_shader_mask = 0xFFFFFFFF; 329 track->cb_shader_mask = 0xFFFFFFFF;
317 track->cb_dirty = true; 330 track->cb_dirty = true;
@@ -348,11 +361,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
348 volatile u32 *ib = p->ib.ptr; 361 volatile u32 *ib = p->ib.ptr;
349 unsigned array_mode; 362 unsigned array_mode;
350 u32 format; 363 u32 format;
364 /* When resolve is used, the second colorbuffer has always 1 sample. */
365 unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
351 366
352 if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
353 dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
354 return -EINVAL;
355 }
356 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; 367 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
357 format = G_0280A0_FORMAT(track->cb_color_info[i]); 368 format = G_0280A0_FORMAT(track->cb_color_info[i]);
358 if (!r600_fmt_is_valid_color(format)) { 369 if (!r600_fmt_is_valid_color(format)) {
@@ -375,7 +386,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
375 array_check.group_size = track->group_size; 386 array_check.group_size = track->group_size;
376 array_check.nbanks = track->nbanks; 387 array_check.nbanks = track->nbanks;
377 array_check.npipes = track->npipes; 388 array_check.npipes = track->npipes;
378 array_check.nsamples = track->nsamples; 389 array_check.nsamples = nsamples;
379 array_check.blocksize = r600_fmt_get_blocksize(format); 390 array_check.blocksize = r600_fmt_get_blocksize(format);
380 if (r600_get_array_mode_alignment(&array_check, 391 if (r600_get_array_mode_alignment(&array_check,
381 &pitch_align, &height_align, &depth_align, &base_align)) { 392 &pitch_align, &height_align, &depth_align, &base_align)) {
@@ -420,7 +431,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
420 } 431 }
421 432
422 /* check offset */ 433 /* check offset */
423 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format); 434 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
435 r600_fmt_get_blocksize(format) * nsamples;
424 switch (array_mode) { 436 switch (array_mode) {
425 default: 437 default:
426 case V_0280A0_ARRAY_LINEAR_GENERAL: 438 case V_0280A0_ARRAY_LINEAR_GENERAL:
@@ -441,7 +453,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
441 * broken userspace. 453 * broken userspace.
442 */ 454 */
443 } else { 455 } else {
444 dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n", 456 dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
445 __func__, i, array_mode, 457 __func__, i, array_mode,
446 track->cb_color_bo_offset[i], tmp, 458 track->cb_color_bo_offset[i], tmp,
447 radeon_bo_size(track->cb_color_bo[i]), 459 radeon_bo_size(track->cb_color_bo[i]),
@@ -458,6 +470,51 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
458 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | 470 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
459 S_028060_SLICE_TILE_MAX(slice_tile_max - 1); 471 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
460 ib[track->cb_color_size_idx[i]] = tmp; 472 ib[track->cb_color_size_idx[i]] = tmp;
473
474 /* FMASK/CMASK */
475 switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
476 case V_0280A0_TILE_DISABLE:
477 break;
478 case V_0280A0_FRAG_ENABLE:
479 if (track->nsamples > 1) {
480 uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
481 /* the tile size is 8x8, but the size is in units of bits.
482 * for bytes, do just * 8. */
483 uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
484
485 if (bytes + track->cb_color_frag_offset[i] >
486 radeon_bo_size(track->cb_color_frag_bo[i])) {
487 dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
488 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
489 __func__, tile_max, bytes,
490 track->cb_color_frag_offset[i],
491 radeon_bo_size(track->cb_color_frag_bo[i]));
492 return -EINVAL;
493 }
494 }
495 /* fall through */
496 case V_0280A0_CLEAR_ENABLE:
497 {
498 uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
499 /* One block = 128x128 pixels, one 8x8 tile has 4 bits..
500 * (128*128) / (8*8) / 2 = 128 bytes per block. */
501 uint32_t bytes = (block_max + 1) * 128;
502
503 if (bytes + track->cb_color_tile_offset[i] >
504 radeon_bo_size(track->cb_color_tile_bo[i])) {
505 dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
506 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
507 __func__, block_max, bytes,
508 track->cb_color_tile_offset[i],
509 radeon_bo_size(track->cb_color_tile_bo[i]));
510 return -EINVAL;
511 }
512 break;
513 }
514 default:
515 dev_warn(p->dev, "%s invalid tile mode\n", __func__);
516 return -EINVAL;
517 }
461 return 0; 518 return 0;
462} 519}
463 520
@@ -566,7 +623,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
566 623
567 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 624 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
568 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 625 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
569 tmp = ntiles * bpe * 64 * nviews; 626 tmp = ntiles * bpe * 64 * nviews * track->nsamples;
570 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 627 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
571 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", 628 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
572 array_mode, 629 array_mode,
@@ -746,6 +803,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
746 */ 803 */
747 if (track->cb_dirty) { 804 if (track->cb_dirty) {
748 tmp = track->cb_target_mask; 805 tmp = track->cb_target_mask;
806
807 /* We must check both colorbuffers for RESOLVE. */
808 if (track->is_resolve) {
809 tmp |= 0xff;
810 }
811
749 for (i = 0; i < 8; i++) { 812 for (i = 0; i < 8; i++) {
750 if ((tmp >> (i * 4)) & 0xF) { 813 if ((tmp >> (i * 4)) & 0xF) {
751 /* at least one component is enabled */ 814 /* at least one component is enabled */
@@ -1231,9 +1294,15 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1231 break; 1294 break;
1232 case R_028C04_PA_SC_AA_CONFIG: 1295 case R_028C04_PA_SC_AA_CONFIG:
1233 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); 1296 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
1297 track->log_nsamples = tmp;
1234 track->nsamples = 1 << tmp; 1298 track->nsamples = 1 << tmp;
1235 track->cb_dirty = true; 1299 track->cb_dirty = true;
1236 break; 1300 break;
1301 case R_028808_CB_COLOR_CONTROL:
1302 tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
1303 track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
1304 track->cb_dirty = true;
1305 break;
1237 case R_0280A0_CB_COLOR0_INFO: 1306 case R_0280A0_CB_COLOR0_INFO:
1238 case R_0280A4_CB_COLOR1_INFO: 1307 case R_0280A4_CB_COLOR1_INFO:
1239 case R_0280A8_CB_COLOR2_INFO: 1308 case R_0280A8_CB_COLOR2_INFO:
@@ -1312,16 +1381,21 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1312 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1381 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1313 return -EINVAL; 1382 return -EINVAL;
1314 } 1383 }
1315 ib[idx] = track->cb_color_base_last[tmp];
1316 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp]; 1384 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
1385 track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
1386 ib[idx] = track->cb_color_base_last[tmp];
1317 } else { 1387 } else {
1318 r = r600_cs_packet_next_reloc(p, &reloc); 1388 r = r600_cs_packet_next_reloc(p, &reloc);
1319 if (r) { 1389 if (r) {
1320 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1390 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1321 return -EINVAL; 1391 return -EINVAL;
1322 } 1392 }
1323 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1324 track->cb_color_frag_bo[tmp] = reloc->robj; 1393 track->cb_color_frag_bo[tmp] = reloc->robj;
1394 track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
1395 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1396 }
1397 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1398 track->cb_dirty = true;
1325 } 1399 }
1326 break; 1400 break;
1327 case R_0280C0_CB_COLOR0_TILE: 1401 case R_0280C0_CB_COLOR0_TILE:
@@ -1338,16 +1412,35 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1338 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1412 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1339 return -EINVAL; 1413 return -EINVAL;
1340 } 1414 }
1341 ib[idx] = track->cb_color_base_last[tmp];
1342 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp]; 1415 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
1416 track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
1417 ib[idx] = track->cb_color_base_last[tmp];
1343 } else { 1418 } else {
1344 r = r600_cs_packet_next_reloc(p, &reloc); 1419 r = r600_cs_packet_next_reloc(p, &reloc);
1345 if (r) { 1420 if (r) {
1346 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1421 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1347 return -EINVAL; 1422 return -EINVAL;
1348 } 1423 }
1349 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1350 track->cb_color_tile_bo[tmp] = reloc->robj; 1424 track->cb_color_tile_bo[tmp] = reloc->robj;
1425 track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
1426 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1427 }
1428 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1429 track->cb_dirty = true;
1430 }
1431 break;
1432 case R_028100_CB_COLOR0_MASK:
1433 case R_028104_CB_COLOR1_MASK:
1434 case R_028108_CB_COLOR2_MASK:
1435 case R_02810C_CB_COLOR3_MASK:
1436 case R_028110_CB_COLOR4_MASK:
1437 case R_028114_CB_COLOR5_MASK:
1438 case R_028118_CB_COLOR6_MASK:
1439 case R_02811C_CB_COLOR7_MASK:
1440 tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
1441 track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
1442 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1443 track->cb_dirty = true;
1351 } 1444 }
1352 break; 1445 break;
1353 case CB_COLOR0_BASE: 1446 case CB_COLOR0_BASE:
@@ -1492,7 +1585,7 @@ unsigned r600_mip_minify(unsigned size, unsigned level)
1492} 1585}
1493 1586
1494static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, 1587static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1495 unsigned w0, unsigned h0, unsigned d0, unsigned format, 1588 unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
1496 unsigned block_align, unsigned height_align, unsigned base_align, 1589 unsigned block_align, unsigned height_align, unsigned base_align,
1497 unsigned *l0_size, unsigned *mipmap_size) 1590 unsigned *l0_size, unsigned *mipmap_size)
1498{ 1591{
@@ -1520,7 +1613,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1520 1613
1521 depth = r600_mip_minify(d0, i); 1614 depth = r600_mip_minify(d0, i);
1522 1615
1523 size = nbx * nby * blocksize; 1616 size = nbx * nby * blocksize * nsamples;
1524 if (nfaces) 1617 if (nfaces)
1525 size *= nfaces; 1618 size *= nfaces;
1526 else 1619 else
@@ -1672,7 +1765,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1672 1765
1673 nfaces = larray - barray + 1; 1766 nfaces = larray - barray + 1;
1674 } 1767 }
1675 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format, 1768 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
1676 pitch_align, height_align, base_align, 1769 pitch_align, height_align, base_align,
1677 &l0_size, &mipmap_size); 1770 &l0_size, &mipmap_size);
1678 /* using get ib will give us the offset into the texture bo */ 1771 /* using get ib will give us the offset into the texture bo */
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index fd328f4c3ea8..fa6f37099ba9 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -66,6 +66,14 @@
66#define CC_RB_BACKEND_DISABLE 0x98F4 66#define CC_RB_BACKEND_DISABLE 0x98F4
67#define BACKEND_DISABLE(x) ((x) << 16) 67#define BACKEND_DISABLE(x) ((x) << 16)
68 68
69#define R_028808_CB_COLOR_CONTROL 0x28808
70#define S_028808_SPECIAL_OP(x) (((x) & 0x7) << 4)
71#define G_028808_SPECIAL_OP(x) (((x) >> 4) & 0x7)
72#define C_028808_SPECIAL_OP 0xFFFFFF8F
73#define V_028808_SPECIAL_NORMAL 0x00
74#define V_028808_SPECIAL_DISABLE 0x01
75#define V_028808_SPECIAL_RESOLVE_BOX 0x07
76
69#define CB_COLOR0_BASE 0x28040 77#define CB_COLOR0_BASE 0x28040
70#define CB_COLOR1_BASE 0x28044 78#define CB_COLOR1_BASE 0x28044
71#define CB_COLOR2_BASE 0x28048 79#define CB_COLOR2_BASE 0x28048
@@ -92,6 +100,20 @@
92#define R_028094_CB_COLOR5_VIEW 0x028094 100#define R_028094_CB_COLOR5_VIEW 0x028094
93#define R_028098_CB_COLOR6_VIEW 0x028098 101#define R_028098_CB_COLOR6_VIEW 0x028098
94#define R_02809C_CB_COLOR7_VIEW 0x02809C 102#define R_02809C_CB_COLOR7_VIEW 0x02809C
103#define R_028100_CB_COLOR0_MASK 0x028100
104#define S_028100_CMASK_BLOCK_MAX(x) (((x) & 0xFFF) << 0)
105#define G_028100_CMASK_BLOCK_MAX(x) (((x) >> 0) & 0xFFF)
106#define C_028100_CMASK_BLOCK_MAX 0xFFFFF000
107#define S_028100_FMASK_TILE_MAX(x) (((x) & 0xFFFFF) << 12)
108#define G_028100_FMASK_TILE_MAX(x) (((x) >> 12) & 0xFFFFF)
109#define C_028100_FMASK_TILE_MAX 0x00000FFF
110#define R_028104_CB_COLOR1_MASK 0x028104
111#define R_028108_CB_COLOR2_MASK 0x028108
112#define R_02810C_CB_COLOR3_MASK 0x02810C
113#define R_028110_CB_COLOR4_MASK 0x028110
114#define R_028114_CB_COLOR5_MASK 0x028114
115#define R_028118_CB_COLOR6_MASK 0x028118
116#define R_02811C_CB_COLOR7_MASK 0x02811C
95#define CB_COLOR0_INFO 0x280a0 117#define CB_COLOR0_INFO 0x280a0
96# define CB_FORMAT(x) ((x) << 2) 118# define CB_FORMAT(x) ((x) << 2)
97# define CB_ARRAY_MODE(x) ((x) << 8) 119# define CB_ARRAY_MODE(x) ((x) << 8)
@@ -1400,6 +1422,9 @@
1400#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18) 1422#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
1401#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3) 1423#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
1402#define C_0280A0_TILE_MODE 0xFFF3FFFF 1424#define C_0280A0_TILE_MODE 0xFFF3FFFF
1425#define V_0280A0_TILE_DISABLE 0
1426#define V_0280A0_CLEAR_ENABLE 1
1427#define V_0280A0_FRAG_ENABLE 2
1403#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20) 1428#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
1404#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1) 1429#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
1405#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF 1430#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 99304194a65c..59a15315ae9f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -142,21 +142,6 @@ struct radeon_device;
142/* 142/*
143 * BIOS. 143 * BIOS.
144 */ 144 */
145#define ATRM_BIOS_PAGE 4096
146
147#if defined(CONFIG_VGA_SWITCHEROO)
148bool radeon_atrm_supported(struct pci_dev *pdev);
149int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
150#else
151static inline bool radeon_atrm_supported(struct pci_dev *pdev)
152{
153 return false;
154}
155
156static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
157 return -EINVAL;
158}
159#endif
160bool radeon_get_bios(struct radeon_device *rdev); 145bool radeon_get_bios(struct radeon_device *rdev);
161 146
162/* 147/*
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f9c21f9d16bc..d67d4f3eb6f4 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -452,7 +452,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
452 } 452 }
453 453
454 /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ 454 /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
455 if ((dev->pdev->device == 0x9802) && 455 if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
456 (dev->pdev->subsystem_vendor == 0x1734) && 456 (dev->pdev->subsystem_vendor == 0x1734) &&
457 (dev->pdev->subsystem_device == 0x11bd)) { 457 (dev->pdev->subsystem_device == 0x11bd)) {
458 if (*connector_type == DRM_MODE_CONNECTOR_VGA) { 458 if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 98724fcb0088..2a2cf0b88a28 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -30,57 +30,8 @@ static struct radeon_atpx_priv {
30 /* handle for device - and atpx */ 30 /* handle for device - and atpx */
31 acpi_handle dhandle; 31 acpi_handle dhandle;
32 acpi_handle atpx_handle; 32 acpi_handle atpx_handle;
33 acpi_handle atrm_handle;
34} radeon_atpx_priv; 33} radeon_atpx_priv;
35 34
36/* retrieve the ROM in 4k blocks */
37static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
38 int offset, int len)
39{
40 acpi_status status;
41 union acpi_object atrm_arg_elements[2], *obj;
42 struct acpi_object_list atrm_arg;
43 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
44
45 atrm_arg.count = 2;
46 atrm_arg.pointer = &atrm_arg_elements[0];
47
48 atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
49 atrm_arg_elements[0].integer.value = offset;
50
51 atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
52 atrm_arg_elements[1].integer.value = len;
53
54 status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
55 if (ACPI_FAILURE(status)) {
56 printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
57 return -ENODEV;
58 }
59
60 obj = (union acpi_object *)buffer.pointer;
61 memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
62 len = obj->buffer.length;
63 kfree(buffer.pointer);
64 return len;
65}
66
67bool radeon_atrm_supported(struct pci_dev *pdev)
68{
69 /* get the discrete ROM only via ATRM */
70 if (!radeon_atpx_priv.atpx_detected)
71 return false;
72
73 if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
74 return false;
75 return true;
76}
77
78
79int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
80{
81 return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
82}
83
84static int radeon_atpx_get_version(acpi_handle handle) 35static int radeon_atpx_get_version(acpi_handle handle)
85{ 36{
86 acpi_status status; 37 acpi_status status;
@@ -198,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
198 149
199static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) 150static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
200{ 151{
201 acpi_handle dhandle, atpx_handle, atrm_handle; 152 acpi_handle dhandle, atpx_handle;
202 acpi_status status; 153 acpi_status status;
203 154
204 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); 155 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
@@ -209,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
209 if (ACPI_FAILURE(status)) 160 if (ACPI_FAILURE(status))
210 return false; 161 return false;
211 162
212 status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
213 if (ACPI_FAILURE(status))
214 return false;
215
216 radeon_atpx_priv.dhandle = dhandle; 163 radeon_atpx_priv.dhandle = dhandle;
217 radeon_atpx_priv.atpx_handle = atpx_handle; 164 radeon_atpx_priv.atpx_handle = atpx_handle;
218 radeon_atpx_priv.atrm_handle = atrm_handle;
219 return true; 165 return true;
220} 166}
221 167
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 501f4881e5aa..d306cc8fdeaa 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/vga_switcheroo.h> 33#include <linux/vga_switcheroo.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/acpi.h>
35/* 36/*
36 * BIOS. 37 * BIOS.
37 */ 38 */
@@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev)
98 return true; 99 return true;
99} 100}
100 101
102#ifdef CONFIG_ACPI
101/* ATRM is used to get the BIOS on the discrete cards in 103/* ATRM is used to get the BIOS on the discrete cards in
102 * dual-gpu systems. 104 * dual-gpu systems.
103 */ 105 */
106/* retrieve the ROM in 4k blocks */
107#define ATRM_BIOS_PAGE 4096
108/**
109 * radeon_atrm_call - fetch a chunk of the vbios
110 *
111 * @atrm_handle: acpi ATRM handle
112 * @bios: vbios image pointer
113 * @offset: offset of vbios image data to fetch
114 * @len: length of vbios image data to fetch
115 *
116 * Executes ATRM to fetch a chunk of the discrete
117 * vbios image on PX systems (all asics).
118 * Returns the length of the buffer fetched.
119 */
120static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
121 int offset, int len)
122{
123 acpi_status status;
124 union acpi_object atrm_arg_elements[2], *obj;
125 struct acpi_object_list atrm_arg;
126 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
127
128 atrm_arg.count = 2;
129 atrm_arg.pointer = &atrm_arg_elements[0];
130
131 atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
132 atrm_arg_elements[0].integer.value = offset;
133
134 atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
135 atrm_arg_elements[1].integer.value = len;
136
137 status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
138 if (ACPI_FAILURE(status)) {
139 printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
140 return -ENODEV;
141 }
142
143 obj = (union acpi_object *)buffer.pointer;
144 memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
145 len = obj->buffer.length;
146 kfree(buffer.pointer);
147 return len;
148}
149
104static bool radeon_atrm_get_bios(struct radeon_device *rdev) 150static bool radeon_atrm_get_bios(struct radeon_device *rdev)
105{ 151{
106 int ret; 152 int ret;
107 int size = 256 * 1024; 153 int size = 256 * 1024;
108 int i; 154 int i;
155 struct pci_dev *pdev = NULL;
156 acpi_handle dhandle, atrm_handle;
157 acpi_status status;
158 bool found = false;
159
160 /* ATRM is for the discrete card only */
161 if (rdev->flags & RADEON_IS_IGP)
162 return false;
163
164 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
165 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
166 if (!dhandle)
167 continue;
168
169 status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
170 if (!ACPI_FAILURE(status)) {
171 found = true;
172 break;
173 }
174 }
109 175
110 if (!radeon_atrm_supported(rdev->pdev)) 176 if (!found)
111 return false; 177 return false;
112 178
113 rdev->bios = kmalloc(size, GFP_KERNEL); 179 rdev->bios = kmalloc(size, GFP_KERNEL);
@@ -117,9 +183,10 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
117 } 183 }
118 184
119 for (i = 0; i < size / ATRM_BIOS_PAGE; i++) { 185 for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
120 ret = radeon_atrm_get_bios_chunk(rdev->bios, 186 ret = radeon_atrm_call(atrm_handle,
121 (i * ATRM_BIOS_PAGE), 187 rdev->bios,
122 ATRM_BIOS_PAGE); 188 (i * ATRM_BIOS_PAGE),
189 ATRM_BIOS_PAGE);
123 if (ret < ATRM_BIOS_PAGE) 190 if (ret < ATRM_BIOS_PAGE)
124 break; 191 break;
125 } 192 }
@@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
130 } 197 }
131 return true; 198 return true;
132} 199}
200#else
201static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
202{
203 return false;
204}
205#endif
133 206
134static bool ni_read_disabled_bios(struct radeon_device *rdev) 207static bool ni_read_disabled_bios(struct radeon_device *rdev)
135{ 208{
@@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
476 return legacy_read_disabled_bios(rdev); 549 return legacy_read_disabled_bios(rdev);
477} 550}
478 551
552#ifdef CONFIG_ACPI
553static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
554{
555 bool ret = false;
556 struct acpi_table_header *hdr;
557 acpi_size tbl_size;
558 UEFI_ACPI_VFCT *vfct;
559 GOP_VBIOS_CONTENT *vbios;
560 VFCT_IMAGE_HEADER *vhdr;
561
562 if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
563 return false;
564 if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
565 DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
566 goto out_unmap;
567 }
568
569 vfct = (UEFI_ACPI_VFCT *)hdr;
570 if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
571 DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
572 goto out_unmap;
573 }
574
575 vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
576 vhdr = &vbios->VbiosHeader;
577 DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
578 vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
579 vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
580
581 if (vhdr->PCIBus != rdev->pdev->bus->number ||
582 vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
583 vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
584 vhdr->VendorID != rdev->pdev->vendor ||
585 vhdr->DeviceID != rdev->pdev->device) {
586 DRM_INFO("ACPI VFCT table is not for this card\n");
587 goto out_unmap;
588 };
589
590 if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
591 DRM_ERROR("ACPI VFCT image truncated\n");
592 goto out_unmap;
593 }
594
595 rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
596 ret = !!rdev->bios;
597
598out_unmap:
599 return ret;
600}
601#else
602static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
603{
604 return false;
605}
606#endif
479 607
480bool radeon_get_bios(struct radeon_device *rdev) 608bool radeon_get_bios(struct radeon_device *rdev)
481{ 609{
@@ -484,6 +612,8 @@ bool radeon_get_bios(struct radeon_device *rdev)
484 612
485 r = radeon_atrm_get_bios(rdev); 613 r = radeon_atrm_get_bios(rdev);
486 if (r == false) 614 if (r == false)
615 r = radeon_acpi_vfct_bios(rdev);
616 if (r == false)
487 r = igp_read_bios_from_vram(rdev); 617 r = igp_read_bios_from_vram(rdev);
488 if (r == false) 618 if (r == false)
489 r = radeon_read_bios(rdev); 619 r = radeon_read_bios(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index d2e243867ac6..7a3daebd732d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1051,7 +1051,7 @@ int radeon_device_init(struct radeon_device *rdev,
1051 if (rdev->flags & RADEON_IS_AGP) 1051 if (rdev->flags & RADEON_IS_AGP)
1052 rdev->need_dma32 = true; 1052 rdev->need_dma32 = true;
1053 if ((rdev->flags & RADEON_IS_PCI) && 1053 if ((rdev->flags & RADEON_IS_PCI) &&
1054 (rdev->family < CHIP_RS400)) 1054 (rdev->family <= CHIP_RS740))
1055 rdev->need_dma32 = true; 1055 rdev->need_dma32 = true;
1056 1056
1057 dma_bits = rdev->need_dma32 ? 32 : 40; 1057 dma_bits = rdev->need_dma32 ? 32 : 40;
@@ -1346,12 +1346,15 @@ retry:
1346 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1346 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1347 radeon_ring_restore(rdev, &rdev->ring[i], 1347 radeon_ring_restore(rdev, &rdev->ring[i],
1348 ring_sizes[i], ring_data[i]); 1348 ring_sizes[i], ring_data[i]);
1349 ring_sizes[i] = 0;
1350 ring_data[i] = NULL;
1349 } 1351 }
1350 1352
1351 r = radeon_ib_ring_tests(rdev); 1353 r = radeon_ib_ring_tests(rdev);
1352 if (r) { 1354 if (r) {
1353 dev_err(rdev->dev, "ib ring test failed (%d).\n", r); 1355 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1354 if (saved) { 1356 if (saved) {
1357 saved = false;
1355 radeon_suspend(rdev); 1358 radeon_suspend(rdev);
1356 goto retry; 1359 goto retry;
1357 } 1360 }
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index d7269f48d37c..8c593ea82c41 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -62,9 +62,11 @@
62 * 2.18.0 - r600-eg: allow "invalid" DB formats 62 * 2.18.0 - r600-eg: allow "invalid" DB formats
63 * 2.19.0 - r600-eg: MSAA textures 63 * 2.19.0 - r600-eg: MSAA textures
64 * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query 64 * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
65 * 2.21.0 - r600-r700: FMASK and CMASK
66 * 2.22.0 - r600 only: RESOLVE_BOX allowed
65 */ 67 */
66#define KMS_DRIVER_MAJOR 2 68#define KMS_DRIVER_MAJOR 2
67#define KMS_DRIVER_MINOR 20 69#define KMS_DRIVER_MINOR 22
68#define KMS_DRIVER_PATCHLEVEL 0 70#define KMS_DRIVER_PATCHLEVEL 0
69int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 71int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
70int radeon_driver_unload_kms(struct drm_device *dev); 72int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1cb014b571ab..9024e7222839 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -132,6 +132,7 @@ int radeon_bo_create(struct radeon_device *rdev,
132 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, 132 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
133 sizeof(struct radeon_bo)); 133 sizeof(struct radeon_bo));
134 134
135retry:
135 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 136 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
136 if (bo == NULL) 137 if (bo == NULL)
137 return -ENOMEM; 138 return -ENOMEM;
@@ -145,8 +146,6 @@ int radeon_bo_create(struct radeon_device *rdev,
145 bo->surface_reg = -1; 146 bo->surface_reg = -1;
146 INIT_LIST_HEAD(&bo->list); 147 INIT_LIST_HEAD(&bo->list);
147 INIT_LIST_HEAD(&bo->va); 148 INIT_LIST_HEAD(&bo->va);
148
149retry:
150 radeon_ttm_placement_from_domain(bo, domain); 149 radeon_ttm_placement_from_domain(bo, domain);
151 /* Kernel allocation are uninterruptible */ 150 /* Kernel allocation are uninterruptible */
152 down_read(&rdev->pm.mclk_lock); 151 down_read(&rdev->pm.mclk_lock);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index ec79b3750430..43c431a2686d 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -706,6 +706,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
706 if (radeon_debugfs_ring_init(rdev, ring)) { 706 if (radeon_debugfs_ring_init(rdev, ring)) {
707 DRM_ERROR("Failed to register debugfs file for rings !\n"); 707 DRM_ERROR("Failed to register debugfs file for rings !\n");
708 } 708 }
709 radeon_ring_lockup_update(ring);
709 return 0; 710 return 0;
710} 711}
711 712
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 5e659b034d9a..20bfbda7b3f1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -744,15 +744,6 @@ r600 0x9400
7440x00028C38 CB_CLRCMP_DST 7440x00028C38 CB_CLRCMP_DST
7450x00028C3C CB_CLRCMP_MSK 7450x00028C3C CB_CLRCMP_MSK
7460x00028C34 CB_CLRCMP_SRC 7460x00028C34 CB_CLRCMP_SRC
7470x00028100 CB_COLOR0_MASK
7480x00028104 CB_COLOR1_MASK
7490x00028108 CB_COLOR2_MASK
7500x0002810C CB_COLOR3_MASK
7510x00028110 CB_COLOR4_MASK
7520x00028114 CB_COLOR5_MASK
7530x00028118 CB_COLOR6_MASK
7540x0002811C CB_COLOR7_MASK
7550x00028808 CB_COLOR_CONTROL
7560x0002842C CB_FOG_BLUE 7470x0002842C CB_FOG_BLUE
7570x00028428 CB_FOG_GREEN 7480x00028428 CB_FOG_GREEN
7580x00028424 CB_FOG_RED 7490x00028424 CB_FOG_RED
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 0b5e096d39a6..56e0bf31d425 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -1,6 +1,7 @@
1config DRM_UDL 1config DRM_UDL
2 tristate "DisplayLink" 2 tristate "DisplayLink"
3 depends on DRM && EXPERIMENTAL 3 depends on DRM && EXPERIMENTAL
4 depends on USB_ARCH_HAS_HCD
4 select DRM_USB 5 select DRM_USB
5 select FB_SYS_FILLRECT 6 select FB_SYS_FILLRECT
6 select FB_SYS_COPYAREA 7 select FB_SYS_COPYAREA
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index f5dd89e891de..9159d48d1dfd 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -354,8 +354,7 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
354 354
355static void udl_crtc_disable(struct drm_crtc *crtc) 355static void udl_crtc_disable(struct drm_crtc *crtc)
356{ 356{
357 357 udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
358
359} 358}
360 359
361static void udl_crtc_destroy(struct drm_crtc *crtc) 360static void udl_crtc_destroy(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 6b0078ffa763..c50724bd30f6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1688,15 +1688,19 @@ int vmw_du_page_flip(struct drm_crtc *crtc,
1688 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 1688 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1689 struct drm_framebuffer *old_fb = crtc->fb; 1689 struct drm_framebuffer *old_fb = crtc->fb;
1690 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb); 1690 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
1691 struct drm_file *file_priv = event->base.file_priv; 1691 struct drm_file *file_priv ;
1692 struct vmw_fence_obj *fence = NULL; 1692 struct vmw_fence_obj *fence = NULL;
1693 struct drm_clip_rect clips; 1693 struct drm_clip_rect clips;
1694 int ret; 1694 int ret;
1695 1695
1696 if (event == NULL)
1697 return -EINVAL;
1698
1696 /* require ScreenObject support for page flipping */ 1699 /* require ScreenObject support for page flipping */
1697 if (!dev_priv->sou_priv) 1700 if (!dev_priv->sou_priv)
1698 return -ENOSYS; 1701 return -ENOSYS;
1699 1702
1703 file_priv = event->base.file_priv;
1700 if (!vmw_kms_screen_object_flippable(dev_priv, crtc)) 1704 if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
1701 return -EINVAL; 1705 return -EINVAL;
1702 1706
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 5b3c7d135dc9..e25cf31faab2 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -70,27 +70,12 @@ static struct vgasr_priv vgasr_priv = {
70 .clients = LIST_HEAD_INIT(vgasr_priv.clients), 70 .clients = LIST_HEAD_INIT(vgasr_priv.clients),
71}; 71};
72 72
73int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) 73static bool vga_switcheroo_ready(void)
74{
75 mutex_lock(&vgasr_mutex);
76 if (vgasr_priv.handler) {
77 mutex_unlock(&vgasr_mutex);
78 return -EINVAL;
79 }
80
81 vgasr_priv.handler = handler;
82 mutex_unlock(&vgasr_mutex);
83 return 0;
84}
85EXPORT_SYMBOL(vga_switcheroo_register_handler);
86
87void vga_switcheroo_unregister_handler(void)
88{ 74{
89 mutex_lock(&vgasr_mutex); 75 /* we're ready if we get two clients + handler */
90 vgasr_priv.handler = NULL; 76 return !vgasr_priv.active &&
91 mutex_unlock(&vgasr_mutex); 77 vgasr_priv.registered_clients == 2 && vgasr_priv.handler;
92} 78}
93EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
94 79
95static void vga_switcheroo_enable(void) 80static void vga_switcheroo_enable(void)
96{ 81{
@@ -98,7 +83,8 @@ static void vga_switcheroo_enable(void)
98 struct vga_switcheroo_client *client; 83 struct vga_switcheroo_client *client;
99 84
100 /* call the handler to init */ 85 /* call the handler to init */
101 vgasr_priv.handler->init(); 86 if (vgasr_priv.handler->init)
87 vgasr_priv.handler->init();
102 88
103 list_for_each_entry(client, &vgasr_priv.clients, list) { 89 list_for_each_entry(client, &vgasr_priv.clients, list) {
104 if (client->id != -1) 90 if (client->id != -1)
@@ -113,6 +99,37 @@ static void vga_switcheroo_enable(void)
113 vgasr_priv.active = true; 99 vgasr_priv.active = true;
114} 100}
115 101
102int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
103{
104 mutex_lock(&vgasr_mutex);
105 if (vgasr_priv.handler) {
106 mutex_unlock(&vgasr_mutex);
107 return -EINVAL;
108 }
109
110 vgasr_priv.handler = handler;
111 if (vga_switcheroo_ready()) {
112 printk(KERN_INFO "vga_switcheroo: enabled\n");
113 vga_switcheroo_enable();
114 }
115 mutex_unlock(&vgasr_mutex);
116 return 0;
117}
118EXPORT_SYMBOL(vga_switcheroo_register_handler);
119
120void vga_switcheroo_unregister_handler(void)
121{
122 mutex_lock(&vgasr_mutex);
123 vgasr_priv.handler = NULL;
124 if (vgasr_priv.active) {
125 pr_info("vga_switcheroo: disabled\n");
126 vga_switcheroo_debugfs_fini(&vgasr_priv);
127 vgasr_priv.active = false;
128 }
129 mutex_unlock(&vgasr_mutex);
130}
131EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
132
116static int register_client(struct pci_dev *pdev, 133static int register_client(struct pci_dev *pdev,
117 const struct vga_switcheroo_client_ops *ops, 134 const struct vga_switcheroo_client_ops *ops,
118 int id, bool active) 135 int id, bool active)
@@ -134,9 +151,7 @@ static int register_client(struct pci_dev *pdev,
134 if (client_is_vga(client)) 151 if (client_is_vga(client))
135 vgasr_priv.registered_clients++; 152 vgasr_priv.registered_clients++;
136 153
137 /* if we get two clients + handler */ 154 if (vga_switcheroo_ready()) {
138 if (!vgasr_priv.active &&
139 vgasr_priv.registered_clients == 2 && vgasr_priv.handler) {
140 printk(KERN_INFO "vga_switcheroo: enabled\n"); 155 printk(KERN_INFO "vga_switcheroo: enabled\n");
141 vga_switcheroo_enable(); 156 vga_switcheroo_enable();
142 } 157 }
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 60ea284407ce..8bf8a64e5115 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1624,7 +1624,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
1624 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, 1624 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
1625 { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, 1625 { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
1626 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, 1626 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
1627 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
1628 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) }, 1627 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
1629 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, 1628 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
1630 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, 1629 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 351d1f4593e7..4ee578948723 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -34,6 +34,12 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = {
34 .matches = { 34 .matches = {
35 DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58") 35 DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58")
36 } 36 }
37 }, {
38 /* Old interface reads the same sensor for fan0 and fan1 */
39 .ident = "Asus M5A78L",
40 .matches = {
41 DMI_MATCH(DMI_BOARD_NAME, "M5A78L")
42 }
37 }, 43 },
38 { } 44 { }
39}; 45};
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index faa16f80db9c..0fa356fe82cc 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -196,7 +196,7 @@ struct tjmax {
196 int tjmax; 196 int tjmax;
197}; 197};
198 198
199static struct tjmax __cpuinitconst tjmax_table[] = { 199static const struct tjmax __cpuinitconst tjmax_table[] = {
200 { "CPU D410", 100000 }, 200 { "CPU D410", 100000 },
201 { "CPU D425", 100000 }, 201 { "CPU D425", 100000 },
202 { "CPU D510", 100000 }, 202 { "CPU D510", 100000 },
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index ab4825205a9d..5b1a6a666441 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -1206,7 +1206,7 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
1206 int err = -ENODEV; 1206 int err = -ENODEV;
1207 u16 val; 1207 u16 val;
1208 1208
1209 static const __initdata char *names[] = { 1209 static __initconst char *const names[] = {
1210 "W83627HF", 1210 "W83627HF",
1211 "W83627THF", 1211 "W83627THF",
1212 "W83697HF", 1212 "W83697HF",
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
index aedb94f34bf7..dae3ddfe7619 100644
--- a/drivers/i2c/busses/i2c-diolan-u2c.c
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -405,6 +405,7 @@ static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
405 } 405 }
406 } 406 }
407 } 407 }
408 ret = num;
408abort: 409abort:
409 sret = diolan_i2c_stop(dev); 410 sret = diolan_i2c_stop(dev);
410 if (sret < 0 && ret >= 0) 411 if (sret < 0 && ret >= 0)
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 5e6f1eed4f83..61b00edacb08 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -350,10 +350,6 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
350 350
351 i2c_clk = clk_get_rate(dev->clk); 351 i2c_clk = clk_get_rate(dev->clk);
352 352
353 /* fallback to std. mode if machine has not provided it */
354 if (dev->cfg.clk_freq == 0)
355 dev->cfg.clk_freq = 100000;
356
357 /* 353 /*
358 * The spec says, in case of std. mode the divider is 354 * The spec says, in case of std. mode the divider is
359 * 2 whereas it is 3 for fast and fastplus mode of 355 * 2 whereas it is 3 for fast and fastplus mode of
@@ -911,20 +907,32 @@ static const struct i2c_algorithm nmk_i2c_algo = {
911 .functionality = nmk_i2c_functionality 907 .functionality = nmk_i2c_functionality
912}; 908};
913 909
910static struct nmk_i2c_controller u8500_i2c = {
911 /*
912 * Slave data setup time; 250ns, 100ns, and 10ns, which
913 * is 14, 6 and 2 respectively for a 48Mhz i2c clock.
914 */
915 .slsu = 0xe,
916 .tft = 1, /* Tx FIFO threshold */
917 .rft = 8, /* Rx FIFO threshold */
918 .clk_freq = 400000, /* fast mode operation */
919 .timeout = 200, /* Slave response timeout(ms) */
920 .sm = I2C_FREQ_MODE_FAST,
921};
922
914static atomic_t adapter_id = ATOMIC_INIT(0); 923static atomic_t adapter_id = ATOMIC_INIT(0);
915 924
916static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) 925static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
917{ 926{
918 int ret = 0; 927 int ret = 0;
919 struct nmk_i2c_controller *pdata = 928 struct nmk_i2c_controller *pdata = adev->dev.platform_data;
920 adev->dev.platform_data;
921 struct nmk_i2c_dev *dev; 929 struct nmk_i2c_dev *dev;
922 struct i2c_adapter *adap; 930 struct i2c_adapter *adap;
923 931
924 if (!pdata) { 932 if (!pdata)
925 dev_warn(&adev->dev, "no platform data\n"); 933 /* No i2c configuration found, using the default. */
926 return -ENODEV; 934 pdata = &u8500_i2c;
927 } 935
928 dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL); 936 dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL);
929 if (!dev) { 937 if (!dev) {
930 dev_err(&adev->dev, "cannot allocate memory\n"); 938 dev_err(&adev->dev, "cannot allocate memory\n");
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 6849635b268a..5d19a49803c1 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -584,7 +584,7 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
584 584
585 r = pm_runtime_get_sync(dev->dev); 585 r = pm_runtime_get_sync(dev->dev);
586 if (IS_ERR_VALUE(r)) 586 if (IS_ERR_VALUE(r))
587 return r; 587 goto out;
588 588
589 r = omap_i2c_wait_for_bb(dev); 589 r = omap_i2c_wait_for_bb(dev);
590 if (r < 0) 590 if (r < 0)
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 66eb53fac202..9a08c57bc936 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -712,7 +712,7 @@ static int __devexit tegra_i2c_remove(struct platform_device *pdev)
712 return 0; 712 return 0;
713} 713}
714 714
715#ifdef CONFIG_PM 715#ifdef CONFIG_PM_SLEEP
716static int tegra_i2c_suspend(struct device *dev) 716static int tegra_i2c_suspend(struct device *dev)
717{ 717{
718 struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); 718 struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 92406097efeb..8d1e32d7cd97 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -4,7 +4,7 @@
4 4
5int generic_ide_suspend(struct device *dev, pm_message_t mesg) 5int generic_ide_suspend(struct device *dev, pm_message_t mesg)
6{ 6{
7 ide_drive_t *drive = dev_get_drvdata(dev); 7 ide_drive_t *drive = to_ide_device(dev);
8 ide_drive_t *pair = ide_get_pair_dev(drive); 8 ide_drive_t *pair = ide_get_pair_dev(drive);
9 ide_hwif_t *hwif = drive->hwif; 9 ide_hwif_t *hwif = drive->hwif;
10 struct request *rq; 10 struct request *rq;
@@ -40,7 +40,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
40 40
41int generic_ide_resume(struct device *dev) 41int generic_ide_resume(struct device *dev)
42{ 42{
43 ide_drive_t *drive = dev_get_drvdata(dev); 43 ide_drive_t *drive = to_ide_device(dev);
44 ide_drive_t *pair = ide_get_pair_dev(drive); 44 ide_drive_t *pair = ide_get_pair_dev(drive);
45 ide_hwif_t *hwif = drive->hwif; 45 ide_hwif_t *hwif = drive->hwif;
46 struct request *rq; 46 struct request *rq;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 0a2ea317120a..18a89b760aaa 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1111,7 +1111,7 @@ static void print_iommu_info(void)
1111 1111
1112 if (iommu->cap & (1 << IOMMU_CAP_EFR)) { 1112 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1113 pr_info("AMD-Vi: Extended features: "); 1113 pr_info("AMD-Vi: Extended features: ");
1114 for (i = 0; ARRAY_SIZE(feat_str); ++i) { 1114 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1115 if (iommu_feature(iommu, (1ULL << i))) 1115 if (iommu_feature(iommu, (1ULL << i)))
1116 pr_cont(" %s", feat_str[i]); 1116 pr_cont(" %s", feat_str[i]);
1117 } 1117 }
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index e0b18f3ae9a8..af8904de1d44 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -736,6 +736,7 @@ int __init parse_ioapics_under_ir(void)
736{ 736{
737 struct dmar_drhd_unit *drhd; 737 struct dmar_drhd_unit *drhd;
738 int ir_supported = 0; 738 int ir_supported = 0;
739 int ioapic_idx;
739 740
740 for_each_drhd_unit(drhd) { 741 for_each_drhd_unit(drhd) {
741 struct intel_iommu *iommu = drhd->iommu; 742 struct intel_iommu *iommu = drhd->iommu;
@@ -748,13 +749,20 @@ int __init parse_ioapics_under_ir(void)
748 } 749 }
749 } 750 }
750 751
751 if (ir_supported && ir_ioapic_num != nr_ioapics) { 752 if (!ir_supported)
752 printk(KERN_WARNING 753 return 0;
753 "Not all IO-APIC's listed under remapping hardware\n"); 754
754 return -1; 755 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
756 int ioapic_id = mpc_ioapic_id(ioapic_idx);
757 if (!map_ioapic_to_ir(ioapic_id)) {
758 pr_err(FW_BUG "ioapic %d has no mapping iommu, "
759 "interrupt remapping will be disabled\n",
760 ioapic_id);
761 return -1;
762 }
755 } 763 }
756 764
757 return ir_supported; 765 return 1;
758} 766}
759 767
760int __init ir_dev_scope_init(void) 768int __init ir_dev_scope_init(void)
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 664e460f247b..aac622200e99 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -481,7 +481,7 @@ static int smsusb_resume(struct usb_interface *intf)
481 return 0; 481 return 0;
482} 482}
483 483
484static const struct usb_device_id smsusb_id_table[] __devinitconst = { 484static const struct usb_device_id smsusb_id_table[] = {
485 { USB_DEVICE(0x187f, 0x0010), 485 { USB_DEVICE(0x187f, 0x0010),
486 .driver_info = SMS1XXX_BOARD_SIANO_STELLAR }, 486 .driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
487 { USB_DEVICE(0x187f, 0x0100), 487 { USB_DEVICE(0x187f, 0x0100),
diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
index d0b6bb507634..72ded29728bb 100644
--- a/drivers/media/radio/radio-shark.c
+++ b/drivers/media/radio/radio-shark.c
@@ -35,6 +35,11 @@
35#include <media/v4l2-device.h> 35#include <media/v4l2-device.h>
36#include <sound/tea575x-tuner.h> 36#include <sound/tea575x-tuner.h>
37 37
38#if defined(CONFIG_LEDS_CLASS) || \
39 (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK_MODULE))
40#define SHARK_USE_LEDS 1
41#endif
42
38/* 43/*
39 * Version Information 44 * Version Information
40 */ 45 */
@@ -56,44 +61,18 @@ MODULE_LICENSE("GPL");
56 61
57enum { BLUE_LED, BLUE_PULSE_LED, RED_LED, NO_LEDS }; 62enum { BLUE_LED, BLUE_PULSE_LED, RED_LED, NO_LEDS };
58 63
59static void shark_led_set_blue(struct led_classdev *led_cdev,
60 enum led_brightness value);
61static void shark_led_set_blue_pulse(struct led_classdev *led_cdev,
62 enum led_brightness value);
63static void shark_led_set_red(struct led_classdev *led_cdev,
64 enum led_brightness value);
65
66static const struct led_classdev shark_led_templates[NO_LEDS] = {
67 [BLUE_LED] = {
68 .name = "%s:blue:",
69 .brightness = LED_OFF,
70 .max_brightness = 127,
71 .brightness_set = shark_led_set_blue,
72 },
73 [BLUE_PULSE_LED] = {
74 .name = "%s:blue-pulse:",
75 .brightness = LED_OFF,
76 .max_brightness = 255,
77 .brightness_set = shark_led_set_blue_pulse,
78 },
79 [RED_LED] = {
80 .name = "%s:red:",
81 .brightness = LED_OFF,
82 .max_brightness = 1,
83 .brightness_set = shark_led_set_red,
84 },
85};
86
87struct shark_device { 64struct shark_device {
88 struct usb_device *usbdev; 65 struct usb_device *usbdev;
89 struct v4l2_device v4l2_dev; 66 struct v4l2_device v4l2_dev;
90 struct snd_tea575x tea; 67 struct snd_tea575x tea;
91 68
69#ifdef SHARK_USE_LEDS
92 struct work_struct led_work; 70 struct work_struct led_work;
93 struct led_classdev leds[NO_LEDS]; 71 struct led_classdev leds[NO_LEDS];
94 char led_names[NO_LEDS][32]; 72 char led_names[NO_LEDS][32];
95 atomic_t brightness[NO_LEDS]; 73 atomic_t brightness[NO_LEDS];
96 unsigned long brightness_new; 74 unsigned long brightness_new;
75#endif
97 76
98 u8 *transfer_buffer; 77 u8 *transfer_buffer;
99 u32 last_val; 78 u32 last_val;
@@ -175,20 +154,13 @@ static struct snd_tea575x_ops shark_tea_ops = {
175 .read_val = shark_read_val, 154 .read_val = shark_read_val,
176}; 155};
177 156
157#ifdef SHARK_USE_LEDS
178static void shark_led_work(struct work_struct *work) 158static void shark_led_work(struct work_struct *work)
179{ 159{
180 struct shark_device *shark = 160 struct shark_device *shark =
181 container_of(work, struct shark_device, led_work); 161 container_of(work, struct shark_device, led_work);
182 int i, res, brightness, actual_len; 162 int i, res, brightness, actual_len;
183 163
184 /*
185 * We use the v4l2_dev lock and registered bit to ensure the device
186 * does not get unplugged and unreffed while we're running.
187 */
188 mutex_lock(&shark->tea.mutex);
189 if (!video_is_registered(&shark->tea.vd))
190 goto leave;
191
192 for (i = 0; i < 3; i++) { 164 for (i = 0; i < 3; i++) {
193 if (!test_and_clear_bit(i, &shark->brightness_new)) 165 if (!test_and_clear_bit(i, &shark->brightness_new))
194 continue; 166 continue;
@@ -208,8 +180,6 @@ static void shark_led_work(struct work_struct *work)
208 v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n", 180 v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n",
209 shark->led_names[i], res); 181 shark->led_names[i], res);
210 } 182 }
211leave:
212 mutex_unlock(&shark->tea.mutex);
213} 183}
214 184
215static void shark_led_set_blue(struct led_classdev *led_cdev, 185static void shark_led_set_blue(struct led_classdev *led_cdev,
@@ -245,19 +215,78 @@ static void shark_led_set_red(struct led_classdev *led_cdev,
245 schedule_work(&shark->led_work); 215 schedule_work(&shark->led_work);
246} 216}
247 217
218static const struct led_classdev shark_led_templates[NO_LEDS] = {
219 [BLUE_LED] = {
220 .name = "%s:blue:",
221 .brightness = LED_OFF,
222 .max_brightness = 127,
223 .brightness_set = shark_led_set_blue,
224 },
225 [BLUE_PULSE_LED] = {
226 .name = "%s:blue-pulse:",
227 .brightness = LED_OFF,
228 .max_brightness = 255,
229 .brightness_set = shark_led_set_blue_pulse,
230 },
231 [RED_LED] = {
232 .name = "%s:red:",
233 .brightness = LED_OFF,
234 .max_brightness = 1,
235 .brightness_set = shark_led_set_red,
236 },
237};
238
239static int shark_register_leds(struct shark_device *shark, struct device *dev)
240{
241 int i, retval;
242
243 INIT_WORK(&shark->led_work, shark_led_work);
244 for (i = 0; i < NO_LEDS; i++) {
245 shark->leds[i] = shark_led_templates[i];
246 snprintf(shark->led_names[i], sizeof(shark->led_names[0]),
247 shark->leds[i].name, shark->v4l2_dev.name);
248 shark->leds[i].name = shark->led_names[i];
249 retval = led_classdev_register(dev, &shark->leds[i]);
250 if (retval) {
251 v4l2_err(&shark->v4l2_dev,
252 "couldn't register led: %s\n",
253 shark->led_names[i]);
254 return retval;
255 }
256 }
257 return 0;
258}
259
260static void shark_unregister_leds(struct shark_device *shark)
261{
262 int i;
263
264 for (i = 0; i < NO_LEDS; i++)
265 led_classdev_unregister(&shark->leds[i]);
266
267 cancel_work_sync(&shark->led_work);
268}
269#else
270static int shark_register_leds(struct shark_device *shark, struct device *dev)
271{
272 v4l2_warn(&shark->v4l2_dev,
273 "CONFIG_LED_CLASS not enabled, LED support disabled\n");
274 return 0;
275}
276static inline void shark_unregister_leds(struct shark_device *shark) { }
277#endif
278
248static void usb_shark_disconnect(struct usb_interface *intf) 279static void usb_shark_disconnect(struct usb_interface *intf)
249{ 280{
250 struct v4l2_device *v4l2_dev = usb_get_intfdata(intf); 281 struct v4l2_device *v4l2_dev = usb_get_intfdata(intf);
251 struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); 282 struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
252 int i;
253 283
254 mutex_lock(&shark->tea.mutex); 284 mutex_lock(&shark->tea.mutex);
255 v4l2_device_disconnect(&shark->v4l2_dev); 285 v4l2_device_disconnect(&shark->v4l2_dev);
256 snd_tea575x_exit(&shark->tea); 286 snd_tea575x_exit(&shark->tea);
257 mutex_unlock(&shark->tea.mutex); 287 mutex_unlock(&shark->tea.mutex);
258 288
259 for (i = 0; i < NO_LEDS; i++) 289 shark_unregister_leds(shark);
260 led_classdev_unregister(&shark->leds[i]);
261 290
262 v4l2_device_put(&shark->v4l2_dev); 291 v4l2_device_put(&shark->v4l2_dev);
263} 292}
@@ -266,7 +295,6 @@ static void usb_shark_release(struct v4l2_device *v4l2_dev)
266{ 295{
267 struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); 296 struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
268 297
269 cancel_work_sync(&shark->led_work);
270 v4l2_device_unregister(&shark->v4l2_dev); 298 v4l2_device_unregister(&shark->v4l2_dev);
271 kfree(shark->transfer_buffer); 299 kfree(shark->transfer_buffer);
272 kfree(shark); 300 kfree(shark);
@@ -276,7 +304,7 @@ static int usb_shark_probe(struct usb_interface *intf,
276 const struct usb_device_id *id) 304 const struct usb_device_id *id)
277{ 305{
278 struct shark_device *shark; 306 struct shark_device *shark;
279 int i, retval = -ENOMEM; 307 int retval = -ENOMEM;
280 308
281 shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL); 309 shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
282 if (!shark) 310 if (!shark)
@@ -286,17 +314,13 @@ static int usb_shark_probe(struct usb_interface *intf,
286 if (!shark->transfer_buffer) 314 if (!shark->transfer_buffer)
287 goto err_alloc_buffer; 315 goto err_alloc_buffer;
288 316
289 /* 317 v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance);
290 * Work around a bug in usbhid/hid-core.c, where it leaves a dangling 318
291 * pointer in intfdata causing v4l2-device.c to not set it. Which 319 retval = shark_register_leds(shark, &intf->dev);
292 * results in usb_shark_disconnect() referencing the dangling pointer 320 if (retval)
293 * 321 goto err_reg_leds;
294 * REMOVE (as soon as the above bug is fixed, patch submitted)
295 */
296 usb_set_intfdata(intf, NULL);
297 322
298 shark->v4l2_dev.release = usb_shark_release; 323 shark->v4l2_dev.release = usb_shark_release;
299 v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance);
300 retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev); 324 retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev);
301 if (retval) { 325 if (retval) {
302 v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n"); 326 v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n");
@@ -320,32 +344,13 @@ static int usb_shark_probe(struct usb_interface *intf,
320 goto err_init_tea; 344 goto err_init_tea;
321 } 345 }
322 346
323 INIT_WORK(&shark->led_work, shark_led_work);
324 for (i = 0; i < NO_LEDS; i++) {
325 shark->leds[i] = shark_led_templates[i];
326 snprintf(shark->led_names[i], sizeof(shark->led_names[0]),
327 shark->leds[i].name, shark->v4l2_dev.name);
328 shark->leds[i].name = shark->led_names[i];
329 /*
330 * We don't fail the probe if we fail to register the leds,
331 * because once we've called snd_tea575x_init, the /dev/radio0
332 * node may be opened from userspace holding a reference to us!
333 *
334 * Note we cannot register the leds first instead as
335 * shark_led_work depends on the v4l2 mutex and registered bit.
336 */
337 retval = led_classdev_register(&intf->dev, &shark->leds[i]);
338 if (retval)
339 v4l2_err(&shark->v4l2_dev,
340 "couldn't register led: %s\n",
341 shark->led_names[i]);
342 }
343
344 return 0; 347 return 0;
345 348
346err_init_tea: 349err_init_tea:
347 v4l2_device_unregister(&shark->v4l2_dev); 350 v4l2_device_unregister(&shark->v4l2_dev);
348err_reg_dev: 351err_reg_dev:
352 shark_unregister_leds(shark);
353err_reg_leds:
349 kfree(shark->transfer_buffer); 354 kfree(shark->transfer_buffer);
350err_alloc_buffer: 355err_alloc_buffer:
351 kfree(shark); 356 kfree(shark);
diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
index b9575de3e7e8..7b4efdfaae28 100644
--- a/drivers/media/radio/radio-shark2.c
+++ b/drivers/media/radio/radio-shark2.c
@@ -35,6 +35,11 @@
35#include <media/v4l2-device.h> 35#include <media/v4l2-device.h>
36#include "radio-tea5777.h" 36#include "radio-tea5777.h"
37 37
38#if defined(CONFIG_LEDS_CLASS) || \
39 (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK2_MODULE))
40#define SHARK_USE_LEDS 1
41#endif
42
38MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); 43MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
39MODULE_DESCRIPTION("Griffin radioSHARK2, USB radio receiver driver"); 44MODULE_DESCRIPTION("Griffin radioSHARK2, USB radio receiver driver");
40MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
@@ -43,7 +48,6 @@ static int debug;
43module_param(debug, int, 0); 48module_param(debug, int, 0);
44MODULE_PARM_DESC(debug, "Debug level (0-1)"); 49MODULE_PARM_DESC(debug, "Debug level (0-1)");
45 50
46
47#define SHARK_IN_EP 0x83 51#define SHARK_IN_EP 0x83
48#define SHARK_OUT_EP 0x05 52#define SHARK_OUT_EP 0x05
49 53
@@ -54,36 +58,18 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
54 58
55enum { BLUE_LED, RED_LED, NO_LEDS }; 59enum { BLUE_LED, RED_LED, NO_LEDS };
56 60
57static void shark_led_set_blue(struct led_classdev *led_cdev,
58 enum led_brightness value);
59static void shark_led_set_red(struct led_classdev *led_cdev,
60 enum led_brightness value);
61
62static const struct led_classdev shark_led_templates[NO_LEDS] = {
63 [BLUE_LED] = {
64 .name = "%s:blue:",
65 .brightness = LED_OFF,
66 .max_brightness = 127,
67 .brightness_set = shark_led_set_blue,
68 },
69 [RED_LED] = {
70 .name = "%s:red:",
71 .brightness = LED_OFF,
72 .max_brightness = 1,
73 .brightness_set = shark_led_set_red,
74 },
75};
76
77struct shark_device { 61struct shark_device {
78 struct usb_device *usbdev; 62 struct usb_device *usbdev;
79 struct v4l2_device v4l2_dev; 63 struct v4l2_device v4l2_dev;
80 struct radio_tea5777 tea; 64 struct radio_tea5777 tea;
81 65
66#ifdef SHARK_USE_LEDS
82 struct work_struct led_work; 67 struct work_struct led_work;
83 struct led_classdev leds[NO_LEDS]; 68 struct led_classdev leds[NO_LEDS];
84 char led_names[NO_LEDS][32]; 69 char led_names[NO_LEDS][32];
85 atomic_t brightness[NO_LEDS]; 70 atomic_t brightness[NO_LEDS];
86 unsigned long brightness_new; 71 unsigned long brightness_new;
72#endif
87 73
88 u8 *transfer_buffer; 74 u8 *transfer_buffer;
89}; 75};
@@ -161,18 +147,12 @@ static struct radio_tea5777_ops shark_tea_ops = {
161 .read_reg = shark_read_reg, 147 .read_reg = shark_read_reg,
162}; 148};
163 149
150#ifdef SHARK_USE_LEDS
164static void shark_led_work(struct work_struct *work) 151static void shark_led_work(struct work_struct *work)
165{ 152{
166 struct shark_device *shark = 153 struct shark_device *shark =
167 container_of(work, struct shark_device, led_work); 154 container_of(work, struct shark_device, led_work);
168 int i, res, brightness, actual_len; 155 int i, res, brightness, actual_len;
169 /*
170 * We use the v4l2_dev lock and registered bit to ensure the device
171 * does not get unplugged and unreffed while we're running.
172 */
173 mutex_lock(&shark->tea.mutex);
174 if (!video_is_registered(&shark->tea.vd))
175 goto leave;
176 156
177 for (i = 0; i < 2; i++) { 157 for (i = 0; i < 2; i++) {
178 if (!test_and_clear_bit(i, &shark->brightness_new)) 158 if (!test_and_clear_bit(i, &shark->brightness_new))
@@ -191,8 +171,6 @@ static void shark_led_work(struct work_struct *work)
191 v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n", 171 v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n",
192 shark->led_names[i], res); 172 shark->led_names[i], res);
193 } 173 }
194leave:
195 mutex_unlock(&shark->tea.mutex);
196} 174}
197 175
198static void shark_led_set_blue(struct led_classdev *led_cdev, 176static void shark_led_set_blue(struct led_classdev *led_cdev,
@@ -217,19 +195,72 @@ static void shark_led_set_red(struct led_classdev *led_cdev,
217 schedule_work(&shark->led_work); 195 schedule_work(&shark->led_work);
218} 196}
219 197
198static const struct led_classdev shark_led_templates[NO_LEDS] = {
199 [BLUE_LED] = {
200 .name = "%s:blue:",
201 .brightness = LED_OFF,
202 .max_brightness = 127,
203 .brightness_set = shark_led_set_blue,
204 },
205 [RED_LED] = {
206 .name = "%s:red:",
207 .brightness = LED_OFF,
208 .max_brightness = 1,
209 .brightness_set = shark_led_set_red,
210 },
211};
212
213static int shark_register_leds(struct shark_device *shark, struct device *dev)
214{
215 int i, retval;
216
217 INIT_WORK(&shark->led_work, shark_led_work);
218 for (i = 0; i < NO_LEDS; i++) {
219 shark->leds[i] = shark_led_templates[i];
220 snprintf(shark->led_names[i], sizeof(shark->led_names[0]),
221 shark->leds[i].name, shark->v4l2_dev.name);
222 shark->leds[i].name = shark->led_names[i];
223 retval = led_classdev_register(dev, &shark->leds[i]);
224 if (retval) {
225 v4l2_err(&shark->v4l2_dev,
226 "couldn't register led: %s\n",
227 shark->led_names[i]);
228 return retval;
229 }
230 }
231 return 0;
232}
233
234static void shark_unregister_leds(struct shark_device *shark)
235{
236 int i;
237
238 for (i = 0; i < NO_LEDS; i++)
239 led_classdev_unregister(&shark->leds[i]);
240
241 cancel_work_sync(&shark->led_work);
242}
243#else
244static int shark_register_leds(struct shark_device *shark, struct device *dev)
245{
246 v4l2_warn(&shark->v4l2_dev,
247 "CONFIG_LED_CLASS not enabled, LED support disabled\n");
248 return 0;
249}
250static inline void shark_unregister_leds(struct shark_device *shark) { }
251#endif
252
220static void usb_shark_disconnect(struct usb_interface *intf) 253static void usb_shark_disconnect(struct usb_interface *intf)
221{ 254{
222 struct v4l2_device *v4l2_dev = usb_get_intfdata(intf); 255 struct v4l2_device *v4l2_dev = usb_get_intfdata(intf);
223 struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); 256 struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
224 int i;
225 257
226 mutex_lock(&shark->tea.mutex); 258 mutex_lock(&shark->tea.mutex);
227 v4l2_device_disconnect(&shark->v4l2_dev); 259 v4l2_device_disconnect(&shark->v4l2_dev);
228 radio_tea5777_exit(&shark->tea); 260 radio_tea5777_exit(&shark->tea);
229 mutex_unlock(&shark->tea.mutex); 261 mutex_unlock(&shark->tea.mutex);
230 262
231 for (i = 0; i < NO_LEDS; i++) 263 shark_unregister_leds(shark);
232 led_classdev_unregister(&shark->leds[i]);
233 264
234 v4l2_device_put(&shark->v4l2_dev); 265 v4l2_device_put(&shark->v4l2_dev);
235} 266}
@@ -238,7 +269,6 @@ static void usb_shark_release(struct v4l2_device *v4l2_dev)
238{ 269{
239 struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); 270 struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
240 271
241 cancel_work_sync(&shark->led_work);
242 v4l2_device_unregister(&shark->v4l2_dev); 272 v4l2_device_unregister(&shark->v4l2_dev);
243 kfree(shark->transfer_buffer); 273 kfree(shark->transfer_buffer);
244 kfree(shark); 274 kfree(shark);
@@ -248,7 +278,7 @@ static int usb_shark_probe(struct usb_interface *intf,
248 const struct usb_device_id *id) 278 const struct usb_device_id *id)
249{ 279{
250 struct shark_device *shark; 280 struct shark_device *shark;
251 int i, retval = -ENOMEM; 281 int retval = -ENOMEM;
252 282
253 shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL); 283 shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
254 if (!shark) 284 if (!shark)
@@ -258,17 +288,13 @@ static int usb_shark_probe(struct usb_interface *intf,
258 if (!shark->transfer_buffer) 288 if (!shark->transfer_buffer)
259 goto err_alloc_buffer; 289 goto err_alloc_buffer;
260 290
261 /* 291 v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance);
262 * Work around a bug in usbhid/hid-core.c, where it leaves a dangling 292
263 * pointer in intfdata causing v4l2-device.c to not set it. Which 293 retval = shark_register_leds(shark, &intf->dev);
264 * results in usb_shark_disconnect() referencing the dangling pointer 294 if (retval)
265 * 295 goto err_reg_leds;
266 * REMOVE (as soon as the above bug is fixed, patch submitted)
267 */
268 usb_set_intfdata(intf, NULL);
269 296
270 shark->v4l2_dev.release = usb_shark_release; 297 shark->v4l2_dev.release = usb_shark_release;
271 v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance);
272 retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev); 298 retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev);
273 if (retval) { 299 if (retval) {
274 v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n"); 300 v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n");
@@ -292,32 +318,13 @@ static int usb_shark_probe(struct usb_interface *intf,
292 goto err_init_tea; 318 goto err_init_tea;
293 } 319 }
294 320
295 INIT_WORK(&shark->led_work, shark_led_work);
296 for (i = 0; i < NO_LEDS; i++) {
297 shark->leds[i] = shark_led_templates[i];
298 snprintf(shark->led_names[i], sizeof(shark->led_names[0]),
299 shark->leds[i].name, shark->v4l2_dev.name);
300 shark->leds[i].name = shark->led_names[i];
301 /*
302 * We don't fail the probe if we fail to register the leds,
303 * because once we've called radio_tea5777_init, the /dev/radio0
304 * node may be opened from userspace holding a reference to us!
305 *
306 * Note we cannot register the leds first instead as
307 * shark_led_work depends on the v4l2 mutex and registered bit.
308 */
309 retval = led_classdev_register(&intf->dev, &shark->leds[i]);
310 if (retval)
311 v4l2_err(&shark->v4l2_dev,
312 "couldn't register led: %s\n",
313 shark->led_names[i]);
314 }
315
316 return 0; 321 return 0;
317 322
318err_init_tea: 323err_init_tea:
319 v4l2_device_unregister(&shark->v4l2_dev); 324 v4l2_device_unregister(&shark->v4l2_dev);
320err_reg_dev: 325err_reg_dev:
326 shark_unregister_leds(shark);
327err_reg_leds:
321 kfree(shark->transfer_buffer); 328 kfree(shark->transfer_buffer);
322err_alloc_buffer: 329err_alloc_buffer:
323 kfree(shark); 330 kfree(shark);
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index 9e38132afec6..9bb65e170d99 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -151,6 +151,7 @@ static const struct v4l2_frequency_band bands[] = {
151 .index = 0, 151 .index = 0,
152 .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | 152 .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
153 V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | 153 V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
154 V4L2_TUNER_CAP_FREQ_BANDS |
154 V4L2_TUNER_CAP_HWSEEK_BOUNDED | 155 V4L2_TUNER_CAP_HWSEEK_BOUNDED |
155 V4L2_TUNER_CAP_HWSEEK_WRAP, 156 V4L2_TUNER_CAP_HWSEEK_WRAP,
156 .rangelow = 87500 * 16, 157 .rangelow = 87500 * 16,
@@ -162,6 +163,7 @@ static const struct v4l2_frequency_band bands[] = {
162 .index = 1, 163 .index = 1,
163 .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | 164 .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
164 V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | 165 V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
166 V4L2_TUNER_CAP_FREQ_BANDS |
165 V4L2_TUNER_CAP_HWSEEK_BOUNDED | 167 V4L2_TUNER_CAP_HWSEEK_BOUNDED |
166 V4L2_TUNER_CAP_HWSEEK_WRAP, 168 V4L2_TUNER_CAP_HWSEEK_WRAP,
167 .rangelow = 76000 * 16, 169 .rangelow = 76000 * 16,
@@ -173,6 +175,7 @@ static const struct v4l2_frequency_band bands[] = {
173 .index = 2, 175 .index = 2,
174 .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | 176 .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
175 V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | 177 V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
178 V4L2_TUNER_CAP_FREQ_BANDS |
176 V4L2_TUNER_CAP_HWSEEK_BOUNDED | 179 V4L2_TUNER_CAP_HWSEEK_BOUNDED |
177 V4L2_TUNER_CAP_HWSEEK_WRAP, 180 V4L2_TUNER_CAP_HWSEEK_WRAP,
178 .rangelow = 76000 * 16, 181 .rangelow = 76000 * 16,
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 643a6ff7c5d0..f867f04cccc9 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -225,8 +225,9 @@ int si470x_vidioc_querycap(struct file *file, void *priv,
225{ 225{
226 strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver)); 226 strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver));
227 strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); 227 strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
228 capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | 228 capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE |
229 V4L2_CAP_TUNER | V4L2_CAP_RADIO; 229 V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
230 capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS;
230 231
231 return 0; 232 return 0;
232} 233}
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 146be4263ea1..be076f7181e7 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -531,7 +531,7 @@ int si470x_vidioc_querycap(struct file *file, void *priv,
531 strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); 531 strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
532 usb_make_path(radio->usbdev, capability->bus_info, 532 usb_make_path(radio->usbdev, capability->bus_info,
533 sizeof(capability->bus_info)); 533 sizeof(capability->bus_info));
534 capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | 534 capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE |
535 V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE; 535 V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
536 capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS; 536 capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS;
537 return 0; 537 return 0;
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 5180390be7ab..8be57634ba60 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -261,6 +261,7 @@ config IR_WINBOND_CIR
261 261
262config IR_IGUANA 262config IR_IGUANA
263 tristate "IguanaWorks USB IR Transceiver" 263 tristate "IguanaWorks USB IR Transceiver"
264 depends on USB_ARCH_HAS_HCD
264 depends on RC_CORE 265 depends on RC_CORE
265 select USB 266 select USB
266 ---help--- 267 ---help---
diff --git a/drivers/media/video/gspca/jl2005bcd.c b/drivers/media/video/gspca/jl2005bcd.c
index cf9d9fca5b84..234777116e5f 100644
--- a/drivers/media/video/gspca/jl2005bcd.c
+++ b/drivers/media/video/gspca/jl2005bcd.c
@@ -512,7 +512,7 @@ static const struct sd_desc sd_desc = {
512}; 512};
513 513
514/* -- module initialisation -- */ 514/* -- module initialisation -- */
515static const __devinitdata struct usb_device_id device_table[] = { 515static const struct usb_device_id device_table[] = {
516 {USB_DEVICE(0x0979, 0x0227)}, 516 {USB_DEVICE(0x0979, 0x0227)},
517 {} 517 {}
518}; 518};
diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c
index 969bb5a4cd93..bab01c86c315 100644
--- a/drivers/media/video/gspca/spca506.c
+++ b/drivers/media/video/gspca/spca506.c
@@ -579,7 +579,7 @@ static const struct sd_desc sd_desc = {
579}; 579};
580 580
581/* -- module initialisation -- */ 581/* -- module initialisation -- */
582static const struct usb_device_id device_table[] __devinitconst = { 582static const struct usb_device_id device_table[] = {
583 {USB_DEVICE(0x06e1, 0xa190)}, 583 {USB_DEVICE(0x06e1, 0xa190)},
584/*fixme: may be IntelPCCameraPro BRIDGE_SPCA505 584/*fixme: may be IntelPCCameraPro BRIDGE_SPCA505
585 {USB_DEVICE(0x0733, 0x0430)}, */ 585 {USB_DEVICE(0x0733, 0x0430)}, */
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index 7efe9ad7acc7..0b91a5cd38eb 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -431,7 +431,7 @@ static int vidioc_querycap(struct file *file, void *priv,
431 strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1); 431 strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
432 strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1); 432 strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
433 strlcpy(cap->bus_info, MEM2MEM_NAME, sizeof(cap->bus_info)); 433 strlcpy(cap->bus_info, MEM2MEM_NAME, sizeof(cap->bus_info));
434 cap->capabilities = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; 434 cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
435 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; 435 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
436 return 0; 436 return 0;
437} 437}
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index d2e6f82ecfac..560a65aa7038 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -403,7 +403,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev)
403 403
404 dev_dbg(pcdev->icd->parent, "Activate device\n"); 404 dev_dbg(pcdev->icd->parent, "Activate device\n");
405 405
406 clk_enable(pcdev->clk); 406 clk_prepare_enable(pcdev->clk);
407 407
408 /* enable CSI before doing anything else */ 408 /* enable CSI before doing anything else */
409 __raw_writel(csicr1, pcdev->base + CSICR1); 409 __raw_writel(csicr1, pcdev->base + CSICR1);
@@ -422,7 +422,7 @@ static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)
422 /* Disable all CSI interface */ 422 /* Disable all CSI interface */
423 __raw_writel(0x00, pcdev->base + CSICR1); 423 __raw_writel(0x00, pcdev->base + CSICR1);
424 424
425 clk_disable(pcdev->clk); 425 clk_disable_unprepare(pcdev->clk);
426} 426}
427 427
428/* 428/*
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index 637bde8aca28..ac175406e582 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -272,7 +272,7 @@ struct mx2_camera_dev {
272 struct device *dev; 272 struct device *dev;
273 struct soc_camera_host soc_host; 273 struct soc_camera_host soc_host;
274 struct soc_camera_device *icd; 274 struct soc_camera_device *icd;
275 struct clk *clk_csi, *clk_emma; 275 struct clk *clk_csi, *clk_emma_ahb, *clk_emma_ipg;
276 276
277 unsigned int irq_csi, irq_emma; 277 unsigned int irq_csi, irq_emma;
278 void __iomem *base_csi, *base_emma; 278 void __iomem *base_csi, *base_emma;
@@ -407,7 +407,7 @@ static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev)
407{ 407{
408 unsigned long flags; 408 unsigned long flags;
409 409
410 clk_disable(pcdev->clk_csi); 410 clk_disable_unprepare(pcdev->clk_csi);
411 writel(0, pcdev->base_csi + CSICR1); 411 writel(0, pcdev->base_csi + CSICR1);
412 if (cpu_is_mx27()) { 412 if (cpu_is_mx27()) {
413 writel(0, pcdev->base_emma + PRP_CNTL); 413 writel(0, pcdev->base_emma + PRP_CNTL);
@@ -435,7 +435,7 @@ static int mx2_camera_add_device(struct soc_camera_device *icd)
435 if (pcdev->icd) 435 if (pcdev->icd)
436 return -EBUSY; 436 return -EBUSY;
437 437
438 ret = clk_enable(pcdev->clk_csi); 438 ret = clk_prepare_enable(pcdev->clk_csi);
439 if (ret < 0) 439 if (ret < 0)
440 return ret; 440 return ret;
441 441
@@ -1633,23 +1633,34 @@ static int __devinit mx27_camera_emma_init(struct mx2_camera_dev *pcdev)
1633 goto exit_iounmap; 1633 goto exit_iounmap;
1634 } 1634 }
1635 1635
1636 pcdev->clk_emma = clk_get(NULL, "emma"); 1636 pcdev->clk_emma_ipg = clk_get(pcdev->dev, "emma-ipg");
1637 if (IS_ERR(pcdev->clk_emma)) { 1637 if (IS_ERR(pcdev->clk_emma_ipg)) {
1638 err = PTR_ERR(pcdev->clk_emma); 1638 err = PTR_ERR(pcdev->clk_emma_ipg);
1639 goto exit_free_irq; 1639 goto exit_free_irq;
1640 } 1640 }
1641 1641
1642 clk_enable(pcdev->clk_emma); 1642 clk_prepare_enable(pcdev->clk_emma_ipg);
1643
1644 pcdev->clk_emma_ahb = clk_get(pcdev->dev, "emma-ahb");
1645 if (IS_ERR(pcdev->clk_emma_ahb)) {
1646 err = PTR_ERR(pcdev->clk_emma_ahb);
1647 goto exit_clk_emma_ipg_put;
1648 }
1649
1650 clk_prepare_enable(pcdev->clk_emma_ahb);
1643 1651
1644 err = mx27_camera_emma_prp_reset(pcdev); 1652 err = mx27_camera_emma_prp_reset(pcdev);
1645 if (err) 1653 if (err)
1646 goto exit_clk_emma_put; 1654 goto exit_clk_emma_ahb_put;
1647 1655
1648 return err; 1656 return err;
1649 1657
1650exit_clk_emma_put: 1658exit_clk_emma_ahb_put:
1651 clk_disable(pcdev->clk_emma); 1659 clk_disable_unprepare(pcdev->clk_emma_ahb);
1652 clk_put(pcdev->clk_emma); 1660 clk_put(pcdev->clk_emma_ahb);
1661exit_clk_emma_ipg_put:
1662 clk_disable_unprepare(pcdev->clk_emma_ipg);
1663 clk_put(pcdev->clk_emma_ipg);
1653exit_free_irq: 1664exit_free_irq:
1654 free_irq(pcdev->irq_emma, pcdev); 1665 free_irq(pcdev->irq_emma, pcdev);
1655exit_iounmap: 1666exit_iounmap:
@@ -1685,7 +1696,7 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev)
1685 goto exit; 1696 goto exit;
1686 } 1697 }
1687 1698
1688 pcdev->clk_csi = clk_get(&pdev->dev, NULL); 1699 pcdev->clk_csi = clk_get(&pdev->dev, "ahb");
1689 if (IS_ERR(pcdev->clk_csi)) { 1700 if (IS_ERR(pcdev->clk_csi)) {
1690 dev_err(&pdev->dev, "Could not get csi clock\n"); 1701 dev_err(&pdev->dev, "Could not get csi clock\n");
1691 err = PTR_ERR(pcdev->clk_csi); 1702 err = PTR_ERR(pcdev->clk_csi);
@@ -1785,8 +1796,10 @@ exit_free_emma:
1785eallocctx: 1796eallocctx:
1786 if (cpu_is_mx27()) { 1797 if (cpu_is_mx27()) {
1787 free_irq(pcdev->irq_emma, pcdev); 1798 free_irq(pcdev->irq_emma, pcdev);
1788 clk_disable(pcdev->clk_emma); 1799 clk_disable_unprepare(pcdev->clk_emma_ipg);
1789 clk_put(pcdev->clk_emma); 1800 clk_put(pcdev->clk_emma_ipg);
1801 clk_disable_unprepare(pcdev->clk_emma_ahb);
1802 clk_put(pcdev->clk_emma_ahb);
1790 iounmap(pcdev->base_emma); 1803 iounmap(pcdev->base_emma);
1791 release_mem_region(pcdev->res_emma->start, resource_size(pcdev->res_emma)); 1804 release_mem_region(pcdev->res_emma->start, resource_size(pcdev->res_emma));
1792 } 1805 }
@@ -1825,8 +1838,10 @@ static int __devexit mx2_camera_remove(struct platform_device *pdev)
1825 iounmap(pcdev->base_csi); 1838 iounmap(pcdev->base_csi);
1826 1839
1827 if (cpu_is_mx27()) { 1840 if (cpu_is_mx27()) {
1828 clk_disable(pcdev->clk_emma); 1841 clk_disable_unprepare(pcdev->clk_emma_ipg);
1829 clk_put(pcdev->clk_emma); 1842 clk_put(pcdev->clk_emma_ipg);
1843 clk_disable_unprepare(pcdev->clk_emma_ahb);
1844 clk_put(pcdev->clk_emma_ahb);
1830 iounmap(pcdev->base_emma); 1845 iounmap(pcdev->base_emma);
1831 res = pcdev->res_emma; 1846 res = pcdev->res_emma;
1832 release_mem_region(res->start, resource_size(res)); 1847 release_mem_region(res->start, resource_size(res));
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index f13643d31353..af2297dd49c8 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -61,15 +61,9 @@
61 61
62#define MAX_VIDEO_MEM 16 62#define MAX_VIDEO_MEM 16
63 63
64enum csi_buffer_state {
65 CSI_BUF_NEEDS_INIT,
66 CSI_BUF_PREPARED,
67};
68
69struct mx3_camera_buffer { 64struct mx3_camera_buffer {
70 /* common v4l buffer stuff -- must be first */ 65 /* common v4l buffer stuff -- must be first */
71 struct vb2_buffer vb; 66 struct vb2_buffer vb;
72 enum csi_buffer_state state;
73 struct list_head queue; 67 struct list_head queue;
74 68
75 /* One descriptot per scatterlist (per frame) */ 69 /* One descriptot per scatterlist (per frame) */
@@ -285,7 +279,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
285 goto error; 279 goto error;
286 } 280 }
287 281
288 if (buf->state == CSI_BUF_NEEDS_INIT) { 282 if (!buf->txd) {
289 sg_dma_address(sg) = vb2_dma_contig_plane_dma_addr(vb, 0); 283 sg_dma_address(sg) = vb2_dma_contig_plane_dma_addr(vb, 0);
290 sg_dma_len(sg) = new_size; 284 sg_dma_len(sg) = new_size;
291 285
@@ -298,7 +292,6 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
298 txd->callback_param = txd; 292 txd->callback_param = txd;
299 txd->callback = mx3_cam_dma_done; 293 txd->callback = mx3_cam_dma_done;
300 294
301 buf->state = CSI_BUF_PREPARED;
302 buf->txd = txd; 295 buf->txd = txd;
303 } else { 296 } else {
304 txd = buf->txd; 297 txd = buf->txd;
@@ -385,7 +378,6 @@ static void mx3_videobuf_release(struct vb2_buffer *vb)
385 378
386 /* Doesn't hurt also if the list is empty */ 379 /* Doesn't hurt also if the list is empty */
387 list_del_init(&buf->queue); 380 list_del_init(&buf->queue);
388 buf->state = CSI_BUF_NEEDS_INIT;
389 381
390 if (txd) { 382 if (txd) {
391 buf->txd = NULL; 383 buf->txd = NULL;
@@ -405,13 +397,13 @@ static int mx3_videobuf_init(struct vb2_buffer *vb)
405 struct mx3_camera_dev *mx3_cam = ici->priv; 397 struct mx3_camera_dev *mx3_cam = ici->priv;
406 struct mx3_camera_buffer *buf = to_mx3_vb(vb); 398 struct mx3_camera_buffer *buf = to_mx3_vb(vb);
407 399
408 /* This is for locking debugging only */ 400 if (!buf->txd) {
409 INIT_LIST_HEAD(&buf->queue); 401 /* This is for locking debugging only */
410 sg_init_table(&buf->sg, 1); 402 INIT_LIST_HEAD(&buf->queue);
403 sg_init_table(&buf->sg, 1);
411 404
412 buf->state = CSI_BUF_NEEDS_INIT; 405 mx3_cam->buf_total += vb2_plane_size(vb, 0);
413 406 }
414 mx3_cam->buf_total += vb2_plane_size(vb, 0);
415 407
416 return 0; 408 return 0;
417} 409}
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index b03ffecb7438..1bde255e45df 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -171,7 +171,8 @@ static int soc_camera_try_fmt(struct soc_camera_device *icd,
171 dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n", 171 dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
172 pixfmtstr(pix->pixelformat), pix->width, pix->height); 172 pixfmtstr(pix->pixelformat), pix->width, pix->height);
173 173
174 if (!(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) { 174 if (pix->pixelformat != V4L2_PIX_FMT_JPEG &&
175 !(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) {
175 pix->bytesperline = 0; 176 pix->bytesperline = 0;
176 pix->sizeimage = 0; 177 pix->sizeimage = 0;
177 } 178 }
diff --git a/drivers/media/video/soc_mediabus.c b/drivers/media/video/soc_mediabus.c
index 89dce097a827..a397812635d6 100644
--- a/drivers/media/video/soc_mediabus.c
+++ b/drivers/media/video/soc_mediabus.c
@@ -378,6 +378,9 @@ EXPORT_SYMBOL(soc_mbus_samples_per_pixel);
378 378
379s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf) 379s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)
380{ 380{
381 if (mf->fourcc == V4L2_PIX_FMT_JPEG)
382 return 0;
383
381 if (mf->layout != SOC_MBUS_LAYOUT_PACKED) 384 if (mf->layout != SOC_MBUS_LAYOUT_PACKED)
382 return width * mf->bits_per_sample / 8; 385 return width * mf->bits_per_sample / 8;
383 386
@@ -400,6 +403,9 @@ EXPORT_SYMBOL(soc_mbus_bytes_per_line);
400s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf, 403s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
401 u32 bytes_per_line, u32 height) 404 u32 bytes_per_line, u32 height)
402{ 405{
406 if (mf->fourcc == V4L2_PIX_FMT_JPEG)
407 return 0;
408
403 if (mf->layout == SOC_MBUS_LAYOUT_PACKED) 409 if (mf->layout == SOC_MBUS_LAYOUT_PACKED)
404 return bytes_per_line * height; 410 return bytes_per_line * height;
405 411
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 9288fbd5001b..5577381b5bf0 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -338,6 +338,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
338 if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) { 338 if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
339 buf->error = 0; 339 buf->error = 0;
340 buf->state = UVC_BUF_STATE_QUEUED; 340 buf->state = UVC_BUF_STATE_QUEUED;
341 buf->bytesused = 0;
341 vb2_set_plane_payload(&buf->buf, 0, 0); 342 vb2_set_plane_payload(&buf->buf, 0, 0);
342 return buf; 343 return buf;
343 } 344 }
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index c3b7b5f59b32..6bc47fc82fe2 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -402,8 +402,10 @@ static void v4l_print_hw_freq_seek(const void *arg, bool write_only)
402{ 402{
403 const struct v4l2_hw_freq_seek *p = arg; 403 const struct v4l2_hw_freq_seek *p = arg;
404 404
405 pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u\n", 405 pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, "
406 p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing); 406 "rangelow=%u, rangehigh=%u\n",
407 p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing,
408 p->rangelow, p->rangehigh);
407} 409}
408 410
409static void v4l_print_requestbuffers(const void *arg, bool write_only) 411static void v4l_print_requestbuffers(const void *arg, bool write_only)
@@ -1853,6 +1855,8 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
1853 .type = type, 1855 .type = type,
1854 }; 1856 };
1855 1857
1858 if (p->index)
1859 return -EINVAL;
1856 err = ops->vidioc_g_tuner(file, fh, &t); 1860 err = ops->vidioc_g_tuner(file, fh, &t);
1857 if (err) 1861 if (err)
1858 return err; 1862 return err;
@@ -1870,6 +1874,8 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
1870 1874
1871 if (type != V4L2_TUNER_RADIO) 1875 if (type != V4L2_TUNER_RADIO)
1872 return -EINVAL; 1876 return -EINVAL;
1877 if (p->index)
1878 return -EINVAL;
1873 err = ops->vidioc_g_modulator(file, fh, &m); 1879 err = ops->vidioc_g_modulator(file, fh, &m);
1874 if (err) 1880 if (err)
1875 return err; 1881 return err;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index d1facef28a60..b1a146205c08 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -395,7 +395,8 @@ config MFD_TC6387XB
395 395
396config MFD_TC6393XB 396config MFD_TC6393XB
397 bool "Support Toshiba TC6393XB" 397 bool "Support Toshiba TC6393XB"
398 depends on GPIOLIB && ARM && HAVE_CLK 398 depends on ARM && HAVE_CLK
399 select GPIOLIB
399 select MFD_CORE 400 select MFD_CORE
400 select MFD_TMIO 401 select MFD_TMIO
401 help 402 help
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 87b251ab6ec5..b9e2000969f0 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -18,6 +18,8 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/cpu.h>
22#include <linux/module.h>
21#include <linux/err.h> 23#include <linux/err.h>
22#include <linux/slab.h> 24#include <linux/slab.h>
23#include <asm/uv/uv_hub.h> 25#include <asm/uv/uv_hub.h>
@@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
59 XPC_NOTIFY_MSG_SIZE_UV) 61 XPC_NOTIFY_MSG_SIZE_UV)
60#define XPC_NOTIFY_IRQ_NAME "xpc_notify" 62#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
61 63
64static int xpc_mq_node = -1;
65
62static struct xpc_gru_mq_uv *xpc_activate_mq_uv; 66static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
63static struct xpc_gru_mq_uv *xpc_notify_mq_uv; 67static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
64 68
@@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
109#if defined CONFIG_X86_64 113#if defined CONFIG_X86_64
110 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, 114 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
111 UV_AFFINITY_CPU); 115 UV_AFFINITY_CPU);
112 if (mq->irq < 0) { 116 if (mq->irq < 0)
113 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
114 -mq->irq);
115 return mq->irq; 117 return mq->irq;
116 }
117 118
118 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); 119 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
119 120
@@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
238 mq->mmr_blade = uv_cpu_to_blade_id(cpu); 239 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
239 240
240 nid = cpu_to_node(cpu); 241 nid = cpu_to_node(cpu);
241 page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 242 page = alloc_pages_exact_node(nid,
242 pg_order); 243 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
244 pg_order);
243 if (page == NULL) { 245 if (page == NULL) {
244 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 246 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
245 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); 247 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
@@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
1731 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, 1733 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
1732}; 1734};
1733 1735
1736static int
1737xpc_init_mq_node(int nid)
1738{
1739 int cpu;
1740
1741 get_online_cpus();
1742
1743 for_each_cpu(cpu, cpumask_of_node(nid)) {
1744 xpc_activate_mq_uv =
1745 xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
1746 XPC_ACTIVATE_IRQ_NAME,
1747 xpc_handle_activate_IRQ_uv);
1748 if (!IS_ERR(xpc_activate_mq_uv))
1749 break;
1750 }
1751 if (IS_ERR(xpc_activate_mq_uv)) {
1752 put_online_cpus();
1753 return PTR_ERR(xpc_activate_mq_uv);
1754 }
1755
1756 for_each_cpu(cpu, cpumask_of_node(nid)) {
1757 xpc_notify_mq_uv =
1758 xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
1759 XPC_NOTIFY_IRQ_NAME,
1760 xpc_handle_notify_IRQ_uv);
1761 if (!IS_ERR(xpc_notify_mq_uv))
1762 break;
1763 }
1764 if (IS_ERR(xpc_notify_mq_uv)) {
1765 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1766 put_online_cpus();
1767 return PTR_ERR(xpc_notify_mq_uv);
1768 }
1769
1770 put_online_cpus();
1771 return 0;
1772}
1773
1734int 1774int
1735xpc_init_uv(void) 1775xpc_init_uv(void)
1736{ 1776{
1777 int nid;
1778 int ret = 0;
1779
1737 xpc_arch_ops = xpc_arch_ops_uv; 1780 xpc_arch_ops = xpc_arch_ops_uv;
1738 1781
1739 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { 1782 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
@@ -1742,21 +1785,21 @@ xpc_init_uv(void)
1742 return -E2BIG; 1785 return -E2BIG;
1743 } 1786 }
1744 1787
1745 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 1788 if (xpc_mq_node < 0)
1746 XPC_ACTIVATE_IRQ_NAME, 1789 for_each_online_node(nid) {
1747 xpc_handle_activate_IRQ_uv); 1790 ret = xpc_init_mq_node(nid);
1748 if (IS_ERR(xpc_activate_mq_uv))
1749 return PTR_ERR(xpc_activate_mq_uv);
1750 1791
1751 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 1792 if (!ret)
1752 XPC_NOTIFY_IRQ_NAME, 1793 break;
1753 xpc_handle_notify_IRQ_uv); 1794 }
1754 if (IS_ERR(xpc_notify_mq_uv)) { 1795 else
1755 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); 1796 ret = xpc_init_mq_node(xpc_mq_node);
1756 return PTR_ERR(xpc_notify_mq_uv);
1757 }
1758 1797
1759 return 0; 1798 if (ret < 0)
1799 dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
1800 -ret);
1801
1802 return ret;
1760} 1803}
1761 1804
1762void 1805void
@@ -1765,3 +1808,6 @@ xpc_exit_uv(void)
1765 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); 1808 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
1766 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); 1809 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1767} 1810}
1811
1812module_param(xpc_mq_node, int, 0);
1813MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6fae5f3ec7f6..d688a8af432c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -398,7 +398,7 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
398 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); 398 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
399 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; 399 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
400 400
401 if (unlikely(netpoll_tx_running(slave_dev))) 401 if (unlikely(netpoll_tx_running(bond->dev)))
402 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); 402 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
403 else 403 else
404 dev_queue_xmit(skb); 404 dev_queue_xmit(skb);
@@ -1235,12 +1235,12 @@ static inline int slave_enable_netpoll(struct slave *slave)
1235 struct netpoll *np; 1235 struct netpoll *np;
1236 int err = 0; 1236 int err = 0;
1237 1237
1238 np = kzalloc(sizeof(*np), GFP_KERNEL); 1238 np = kzalloc(sizeof(*np), GFP_ATOMIC);
1239 err = -ENOMEM; 1239 err = -ENOMEM;
1240 if (!np) 1240 if (!np)
1241 goto out; 1241 goto out;
1242 1242
1243 err = __netpoll_setup(np, slave->dev); 1243 err = __netpoll_setup(np, slave->dev, GFP_ATOMIC);
1244 if (err) { 1244 if (err) {
1245 kfree(np); 1245 kfree(np);
1246 goto out; 1246 goto out;
@@ -1257,9 +1257,7 @@ static inline void slave_disable_netpoll(struct slave *slave)
1257 return; 1257 return;
1258 1258
1259 slave->np = NULL; 1259 slave->np = NULL;
1260 synchronize_rcu_bh(); 1260 __netpoll_free_rcu(np);
1261 __netpoll_cleanup(np);
1262 kfree(np);
1263} 1261}
1264static inline bool slave_dev_support_netpoll(struct net_device *slave_dev) 1262static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
1265{ 1263{
@@ -1292,7 +1290,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
1292 read_unlock(&bond->lock); 1290 read_unlock(&bond->lock);
1293} 1291}
1294 1292
1295static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) 1293static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
1296{ 1294{
1297 struct bonding *bond = netdev_priv(dev); 1295 struct bonding *bond = netdev_priv(dev);
1298 struct slave *slave; 1296 struct slave *slave;
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 4f50145f6483..662c5f7eb0c5 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev)
109 priv = netdev_priv(dev); 109 priv = netdev_priv(dev);
110 110
111 dev->irq = res_irq->start; 111 dev->irq = res_irq->start;
112 priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED); 112 priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
113 if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
114 priv->irq_flags |= IRQF_SHARED;
113 priv->reg_base = addr; 115 priv->reg_base = addr;
114 /* The CAN clock frequency is half the oscillator clock frequency */ 116 /* The CAN clock frequency is half the oscillator clock frequency */
115 priv->can.clock.freq = pdata->osc_freq / 2; 117 priv->can.clock.freq = pdata->osc_freq / 2;
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 310596175676..b595d3422b9f 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card,
150 const uint8_t *mem, *end, *dat; 150 const uint8_t *mem, *end, *dat;
151 uint16_t type, len; 151 uint16_t type, len;
152 uint32_t addr; 152 uint32_t addr;
153 uint8_t *buf = NULL; 153 uint8_t *buf = NULL, *new_buf;
154 int buflen = 0; 154 int buflen = 0;
155 int8_t type_end = 0; 155 int8_t type_end = 0;
156 156
@@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card,
199 if (len > buflen) { 199 if (len > buflen) {
200 /* align buflen */ 200 /* align buflen */
201 buflen = (len + (1024-1)) & ~(1024-1); 201 buflen = (len + (1024-1)) & ~(1024-1);
202 buf = krealloc(buf, buflen, GFP_KERNEL); 202 new_buf = krealloc(buf, buflen, GFP_KERNEL);
203 if (!buf) { 203 if (!new_buf) {
204 ret = -ENOMEM; 204 ret = -ENOMEM;
205 goto failed; 205 goto failed;
206 } 206 }
207 buf = new_buf;
207 } 208 }
208 /* verify record data */ 209 /* verify record data */
209 memcpy_fromio(buf, &dpram[addr + offset], len); 210 memcpy_fromio(buf, &dpram[addr + offset], len);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 463b9ec57d80..6d1a24acb77e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params {
1708 continue; \ 1708 continue; \
1709 else 1709 else
1710 1710
1711#define for_each_napi_rx_queue(bp, var) \
1712 for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
1713
1714/* Skip OOO FP */ 1711/* Skip OOO FP */
1715#define for_each_tx_queue(bp, var) \ 1712#define for_each_tx_queue(bp, var) \
1716 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 1713 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e879e19eb0d6..af20c6ee2cd9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2046 */ 2046 */
2047 bnx2x_setup_tc(bp->dev, bp->max_cos); 2047 bnx2x_setup_tc(bp->dev, bp->max_cos);
2048 2048
2049 /* Add all NAPI objects */
2050 bnx2x_add_all_napi(bp);
2049 bnx2x_napi_enable(bp); 2051 bnx2x_napi_enable(bp);
2050 2052
2051 /* set pf load just before approaching the MCP */ 2053 /* set pf load just before approaching the MCP */
@@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2408 2410
2409 /* Disable HW interrupts, NAPI */ 2411 /* Disable HW interrupts, NAPI */
2410 bnx2x_netif_stop(bp, 1); 2412 bnx2x_netif_stop(bp, 1);
2413 /* Delete all NAPI objects */
2414 bnx2x_del_all_napi(bp);
2411 2415
2412 /* Release IRQs */ 2416 /* Release IRQs */
2413 bnx2x_free_irq(bp); 2417 bnx2x_free_irq(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index dfa757e74296..21b553229ea4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
792 bp->num_napi_queues = bp->num_queues; 792 bp->num_napi_queues = bp->num_queues;
793 793
794 /* Add NAPI objects */ 794 /* Add NAPI objects */
795 for_each_napi_rx_queue(bp, i) 795 for_each_rx_queue(bp, i)
796 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 796 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
797 bnx2x_poll, BNX2X_NAPI_WEIGHT); 797 bnx2x_poll, BNX2X_NAPI_WEIGHT);
798} 798}
@@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
801{ 801{
802 int i; 802 int i;
803 803
804 for_each_napi_rx_queue(bp, i) 804 for_each_rx_queue(bp, i)
805 netif_napi_del(&bnx2x_fp(bp, i, napi)); 805 netif_napi_del(&bnx2x_fp(bp, i, napi));
806} 806}
807 807
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index fc4e0e3885b0..c37a68d68090 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev,
2888 */ 2888 */
2889static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) 2889static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
2890{ 2890{
2891 bnx2x_del_all_napi(bp);
2892 bnx2x_disable_msi(bp); 2891 bnx2x_disable_msi(bp);
2893 BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; 2892 BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
2894 bnx2x_set_int_mode(bp); 2893 bnx2x_set_int_mode(bp);
2895 bnx2x_add_all_napi(bp);
2896} 2894}
2897 2895
2898/** 2896/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 02b5a343b195..21054987257a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8427,6 +8427,8 @@ unload_error:
8427 8427
8428 /* Disable HW interrupts, NAPI */ 8428 /* Disable HW interrupts, NAPI */
8429 bnx2x_netif_stop(bp, 1); 8429 bnx2x_netif_stop(bp, 1);
8430 /* Delete all NAPI objects */
8431 bnx2x_del_all_napi(bp);
8430 8432
8431 /* Release IRQs */ 8433 /* Release IRQs */
8432 bnx2x_free_irq(bp); 8434 bnx2x_free_irq(bp);
@@ -11229,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11229static void poll_bnx2x(struct net_device *dev) 11231static void poll_bnx2x(struct net_device *dev)
11230{ 11232{
11231 struct bnx2x *bp = netdev_priv(dev); 11233 struct bnx2x *bp = netdev_priv(dev);
11234 int i;
11232 11235
11233 disable_irq(bp->pdev->irq); 11236 for_each_eth_queue(bp, i) {
11234 bnx2x_interrupt(bp->pdev->irq, dev); 11237 struct bnx2x_fastpath *fp = &bp->fp[i];
11235 enable_irq(bp->pdev->irq); 11238 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
11239 }
11236} 11240}
11237#endif 11241#endif
11238 11242
@@ -11899,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11899 */ 11903 */
11900 bnx2x_set_int_mode(bp); 11904 bnx2x_set_int_mode(bp);
11901 11905
11902 /* Add all NAPI objects */
11903 bnx2x_add_all_napi(bp);
11904
11905 rc = register_netdev(dev); 11906 rc = register_netdev(dev);
11906 if (rc) { 11907 if (rc) {
11907 dev_err(&pdev->dev, "Cannot register net device\n"); 11908 dev_err(&pdev->dev, "Cannot register net device\n");
@@ -11976,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11976 11977
11977 unregister_netdev(dev); 11978 unregister_netdev(dev);
11978 11979
11979 /* Delete all NAPI objects */
11980 bnx2x_del_all_napi(bp);
11981
11982 /* Power on: we can't let PCI layer write to us while we are in D3 */ 11980 /* Power on: we can't let PCI layer write to us while we are in D3 */
11983 bnx2x_set_power_state(bp, PCI_D0); 11981 bnx2x_set_power_state(bp, PCI_D0);
11984 11982
@@ -12025,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12025 bnx2x_tx_disable(bp); 12023 bnx2x_tx_disable(bp);
12026 12024
12027 bnx2x_netif_stop(bp, 0); 12025 bnx2x_netif_stop(bp, 0);
12026 /* Delete all NAPI objects */
12027 bnx2x_del_all_napi(bp);
12028 12028
12029 del_timer_sync(&bp->timer); 12029 del_timer_sync(&bp->timer);
12030 12030
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 845b2020f291..138446957786 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1243,6 +1243,7 @@ static void set_multicast_list(struct net_device *dev)
1243{ 1243{
1244 struct net_local *lp = netdev_priv(dev); 1244 struct net_local *lp = netdev_priv(dev);
1245 unsigned long flags; 1245 unsigned long flags;
1246 u16 cfg;
1246 1247
1247 spin_lock_irqsave(&lp->lock, flags); 1248 spin_lock_irqsave(&lp->lock, flags);
1248 if (dev->flags & IFF_PROMISC) 1249 if (dev->flags & IFF_PROMISC)
@@ -1260,11 +1261,10 @@ static void set_multicast_list(struct net_device *dev)
1260 /* in promiscuous mode, we accept errored packets, 1261 /* in promiscuous mode, we accept errored packets,
1261 * so we have to enable interrupts on them also 1262 * so we have to enable interrupts on them also
1262 */ 1263 */
1263 writereg(dev, PP_RxCFG, 1264 cfg = lp->curr_rx_cfg;
1264 (lp->curr_rx_cfg | 1265 if (lp->rx_mode == RX_ALL_ACCEPT)
1265 (lp->rx_mode == RX_ALL_ACCEPT) 1266 cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL;
1266 ? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL) 1267 writereg(dev, PP_RxCFG, cfg);
1267 : 0));
1268 spin_unlock_irqrestore(&lp->lock, flags); 1268 spin_unlock_irqrestore(&lp->lock, flags);
1269} 1269}
1270 1270
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 7fac97b4bb59..8c63d06ab12b 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter)
259 int num = 0, status = 0; 259 int num = 0, status = 0;
260 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 260 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
261 261
262 spin_lock_bh(&adapter->mcc_cq_lock); 262 spin_lock(&adapter->mcc_cq_lock);
263 while ((compl = be_mcc_compl_get(adapter))) { 263 while ((compl = be_mcc_compl_get(adapter))) {
264 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 264 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
265 /* Interpret flags as an async trailer */ 265 /* Interpret flags as an async trailer */
@@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter)
280 if (num) 280 if (num)
281 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); 281 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
282 282
283 spin_unlock_bh(&adapter->mcc_cq_lock); 283 spin_unlock(&adapter->mcc_cq_lock);
284 return status; 284 return status;
285} 285}
286 286
@@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
295 if (be_error(adapter)) 295 if (be_error(adapter))
296 return -EIO; 296 return -EIO;
297 297
298 local_bh_disable();
298 status = be_process_mcc(adapter); 299 status = be_process_mcc(adapter);
300 local_bh_enable();
299 301
300 if (atomic_read(&mcc_obj->q.used) == 0) 302 if (atomic_read(&mcc_obj->q.used) == 0)
301 break; 303 break;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 90a903d83d87..78b8aa8069f0 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3763,7 +3763,9 @@ static void be_worker(struct work_struct *work)
3763 /* when interrupts are not yet enabled, just reap any pending 3763 /* when interrupts are not yet enabled, just reap any pending
3764 * mcc completions */ 3764 * mcc completions */
3765 if (!netif_running(adapter->netdev)) { 3765 if (!netif_running(adapter->netdev)) {
3766 local_bh_disable();
3766 be_process_mcc(adapter); 3767 be_process_mcc(adapter);
3768 local_bh_enable();
3767 goto reschedule; 3769 goto reschedule;
3768 } 3770 }
3769 3771
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 0f2d1a710909..151453309401 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -174,8 +174,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
174 174
175 new_bus->phy_mask = ~0; 175 new_bus->phy_mask = ~0;
176 new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 176 new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
177 if (!new_bus->irq) 177 if (!new_bus->irq) {
178 ret = -ENOMEM;
178 goto out_unmap_regs; 179 goto out_unmap_regs;
180 }
179 181
180 new_bus->parent = &ofdev->dev; 182 new_bus->parent = &ofdev->dev;
181 dev_set_drvdata(&ofdev->dev, new_bus); 183 dev_set_drvdata(&ofdev->dev, new_bus);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 55bb867258e6..cdf702a59485 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -137,8 +137,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
137 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); 137 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
138 138
139 fec->fecp = ioremap(res.start, resource_size(&res)); 139 fec->fecp = ioremap(res.start, resource_size(&res));
140 if (!fec->fecp) 140 if (!fec->fecp) {
141 ret = -ENOMEM;
141 goto out_fec; 142 goto out_fec;
143 }
142 144
143 if (get_bus_freq) { 145 if (get_bus_freq) {
144 clock = get_bus_freq(ofdev->dev.of_node); 146 clock = get_bus_freq(ofdev->dev.of_node);
@@ -172,8 +174,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
172 174
173 new_bus->phy_mask = ~0; 175 new_bus->phy_mask = ~0;
174 new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 176 new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
175 if (!new_bus->irq) 177 if (!new_bus->irq) {
178 ret = -ENOMEM;
176 goto out_unmap_regs; 179 goto out_unmap_regs;
180 }
177 181
178 new_bus->parent = &ofdev->dev; 182 new_bus->parent = &ofdev->dev;
179 dev_set_drvdata(&ofdev->dev, new_bus); 183 dev_set_drvdata(&ofdev->dev, new_bus);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4605f7246687..d3233f59a82e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev)
1041 1041
1042 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1042 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1043 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1043 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1044 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1044 dev->features |= NETIF_F_HW_VLAN_RX;
1045 } 1045 }
1046 1046
1047 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1047 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index cd153326c3cf..cb3356c9af80 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -310,6 +310,7 @@ struct e1000_adapter {
310 */ 310 */
311 struct e1000_ring *tx_ring /* One per active queue */ 311 struct e1000_ring *tx_ring /* One per active queue */
312 ____cacheline_aligned_in_smp; 312 ____cacheline_aligned_in_smp;
313 u32 tx_fifo_limit;
313 314
314 struct napi_struct napi; 315 struct napi_struct napi;
315 316
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 46c3b1f9ff89..d01a099475a1 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3517,6 +3517,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
3517 } 3517 }
3518 3518
3519 /* 3519 /*
3520 * Alignment of Tx data is on an arbitrary byte boundary with the
3521 * maximum size per Tx descriptor limited only to the transmit
3522 * allocation of the packet buffer minus 96 bytes with an upper
3523 * limit of 24KB due to receive synchronization limitations.
3524 */
3525 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3526 24 << 10);
3527
3528 /*
3520 * Disable Adaptive Interrupt Moderation if 2 full packets cannot 3529 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
3521 * fit in receive buffer. 3530 * fit in receive buffer.
3522 */ 3531 */
@@ -4785,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
4785 return 1; 4794 return 1;
4786} 4795}
4787 4796
4788#define E1000_MAX_PER_TXD 8192
4789#define E1000_MAX_TXD_PWR 12
4790
4791static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, 4797static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
4792 unsigned int first, unsigned int max_per_txd, 4798 unsigned int first, unsigned int max_per_txd,
4793 unsigned int nr_frags, unsigned int mss) 4799 unsigned int nr_frags)
4794{ 4800{
4795 struct e1000_adapter *adapter = tx_ring->adapter; 4801 struct e1000_adapter *adapter = tx_ring->adapter;
4796 struct pci_dev *pdev = adapter->pdev; 4802 struct pci_dev *pdev = adapter->pdev;
@@ -5023,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5023 5029
5024static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) 5030static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5025{ 5031{
5032 BUG_ON(size > tx_ring->count);
5033
5026 if (e1000_desc_unused(tx_ring) >= size) 5034 if (e1000_desc_unused(tx_ring) >= size)
5027 return 0; 5035 return 0;
5028 return __e1000_maybe_stop_tx(tx_ring, size); 5036 return __e1000_maybe_stop_tx(tx_ring, size);
5029} 5037}
5030 5038
5031#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
5032static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 5039static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5033 struct net_device *netdev) 5040 struct net_device *netdev)
5034{ 5041{
5035 struct e1000_adapter *adapter = netdev_priv(netdev); 5042 struct e1000_adapter *adapter = netdev_priv(netdev);
5036 struct e1000_ring *tx_ring = adapter->tx_ring; 5043 struct e1000_ring *tx_ring = adapter->tx_ring;
5037 unsigned int first; 5044 unsigned int first;
5038 unsigned int max_per_txd = E1000_MAX_PER_TXD;
5039 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
5040 unsigned int tx_flags = 0; 5045 unsigned int tx_flags = 0;
5041 unsigned int len = skb_headlen(skb); 5046 unsigned int len = skb_headlen(skb);
5042 unsigned int nr_frags; 5047 unsigned int nr_frags;
@@ -5056,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5056 } 5061 }
5057 5062
5058 mss = skb_shinfo(skb)->gso_size; 5063 mss = skb_shinfo(skb)->gso_size;
5059 /*
5060 * The controller does a simple calculation to
5061 * make sure there is enough room in the FIFO before
5062 * initiating the DMA for each buffer. The calc is:
5063 * 4 = ceil(buffer len/mss). To make sure we don't
5064 * overrun the FIFO, adjust the max buffer len if mss
5065 * drops.
5066 */
5067 if (mss) { 5064 if (mss) {
5068 u8 hdr_len; 5065 u8 hdr_len;
5069 max_per_txd = min(mss << 2, max_per_txd);
5070 max_txd_pwr = fls(max_per_txd) - 1;
5071 5066
5072 /* 5067 /*
5073 * TSO Workaround for 82571/2/3 Controllers -- if skb->data 5068 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
@@ -5097,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5097 count++; 5092 count++;
5098 count++; 5093 count++;
5099 5094
5100 count += TXD_USE_COUNT(len, max_txd_pwr); 5095 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
5101 5096
5102 nr_frags = skb_shinfo(skb)->nr_frags; 5097 nr_frags = skb_shinfo(skb)->nr_frags;
5103 for (f = 0; f < nr_frags; f++) 5098 for (f = 0; f < nr_frags; f++)
5104 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 5099 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5105 max_txd_pwr); 5100 adapter->tx_fifo_limit);
5106 5101
5107 if (adapter->hw.mac.tx_pkt_filtering) 5102 if (adapter->hw.mac.tx_pkt_filtering)
5108 e1000_transfer_dhcp_info(adapter, skb); 5103 e1000_transfer_dhcp_info(adapter, skb);
@@ -5144,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5144 tx_flags |= E1000_TX_FLAGS_NO_FCS; 5139 tx_flags |= E1000_TX_FLAGS_NO_FCS;
5145 5140
5146 /* if count is 0 then mapping error has occurred */ 5141 /* if count is 0 then mapping error has occurred */
5147 count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); 5142 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5143 nr_frags);
5148 if (count) { 5144 if (count) {
5149 skb_tx_timestamp(skb); 5145 skb_tx_timestamp(skb);
5150 5146
5151 netdev_sent_queue(netdev, skb->len); 5147 netdev_sent_queue(netdev, skb->len);
5152 e1000_tx_queue(tx_ring, tx_flags, count); 5148 e1000_tx_queue(tx_ring, tx_flags, count);
5153 /* Make sure there is space in the ring for the next send. */ 5149 /* Make sure there is space in the ring for the next send. */
5154 e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2); 5150 e1000_maybe_stop_tx(tx_ring,
5155 5151 (MAX_SKB_FRAGS *
5152 DIV_ROUND_UP(PAGE_SIZE,
5153 adapter->tx_fifo_limit) + 2));
5156 } else { 5154 } else {
5157 dev_kfree_skb_any(skb); 5155 dev_kfree_skb_any(skb);
5158 tx_ring->buffer_info[first].time_stamp = 0; 5156 tx_ring->buffer_info[first].time_stamp = 0;
@@ -6327,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6327 adapter->hw.phy.autoneg_advertised = 0x2f; 6325 adapter->hw.phy.autoneg_advertised = 0x2f;
6328 6326
6329 /* ring size defaults */ 6327 /* ring size defaults */
6330 adapter->rx_ring->count = 256; 6328 adapter->rx_ring->count = E1000_DEFAULT_RXD;
6331 adapter->tx_ring->count = 256; 6329 adapter->tx_ring->count = E1000_DEFAULT_TXD;
6332 6330
6333 /* 6331 /*
6334 * Initial Wake on LAN setting - If APM wake is enabled in 6332 * Initial Wake on LAN setting - If APM wake is enabled in
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 4ec3835e1bc2..a018ea2a43de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -432,8 +432,10 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
432 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { 432 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
433 /* Entry already exists, add to duplicates */ 433 /* Entry already exists, add to duplicates */
434 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 434 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
435 if (!dqp) 435 if (!dqp) {
436 err = -ENOMEM;
436 goto out_mailbox; 437 goto out_mailbox;
438 }
437 dqp->qpn = qpn; 439 dqp->qpn = qpn;
438 list_add_tail(&dqp->list, &entry->duplicates); 440 list_add_tail(&dqp->list, &entry->duplicates);
439 found = true; 441 found = true;
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 46df3a04030c..24c2305d7948 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -8,7 +8,7 @@ config SH_ETH
8 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ 8 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
9 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ 9 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
10 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \ 10 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
11 CPU_SUBTYPE_SH7757 || ARCH_R8A7740) 11 CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || ARCH_R8A7779)
12 select CRC32 12 select CRC32
13 select NET_CORE 13 select NET_CORE
14 select MII 14 select MII
@@ -18,4 +18,4 @@ config SH_ETH
18 Renesas SuperH Ethernet device driver. 18 Renesas SuperH Ethernet device driver.
19 This driver supporting CPUs are: 19 This driver supporting CPUs are:
20 - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, 20 - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
21 and R8A7740. 21 R8A7740 and R8A7779.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index af0b867a6cf6..bad8f2eec9b4 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -78,7 +78,7 @@ static void sh_eth_select_mii(struct net_device *ndev)
78#endif 78#endif
79 79
80/* There is CPU dependent code */ 80/* There is CPU dependent code */
81#if defined(CONFIG_CPU_SUBTYPE_SH7724) 81#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779)
82#define SH_ETH_RESET_DEFAULT 1 82#define SH_ETH_RESET_DEFAULT 1
83static void sh_eth_set_duplex(struct net_device *ndev) 83static void sh_eth_set_duplex(struct net_device *ndev)
84{ 84{
@@ -93,13 +93,18 @@ static void sh_eth_set_duplex(struct net_device *ndev)
93static void sh_eth_set_rate(struct net_device *ndev) 93static void sh_eth_set_rate(struct net_device *ndev)
94{ 94{
95 struct sh_eth_private *mdp = netdev_priv(ndev); 95 struct sh_eth_private *mdp = netdev_priv(ndev);
96 unsigned int bits = ECMR_RTM;
97
98#if defined(CONFIG_ARCH_R8A7779)
99 bits |= ECMR_ELB;
100#endif
96 101
97 switch (mdp->speed) { 102 switch (mdp->speed) {
98 case 10: /* 10BASE */ 103 case 10: /* 10BASE */
99 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); 104 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR);
100 break; 105 break;
101 case 100:/* 100BASE */ 106 case 100:/* 100BASE */
102 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); 107 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR);
103 break; 108 break;
104 default: 109 default:
105 break; 110 break;
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 8cba2df82b18..5faedd855b77 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -863,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
863 &ip_entry->ip4dst, &ip_entry->pdst); 863 &ip_entry->ip4dst, &ip_entry->pdst);
864 if (rc != 0) { 864 if (rc != 0) {
865 rc = efx_filter_get_ipv4_full( 865 rc = efx_filter_get_ipv4_full(
866 &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc, 866 &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
867 &ip_entry->ip4dst, &ip_entry->pdst); 867 &ip_entry->ip4src, &ip_entry->psrc);
868 EFX_WARN_ON_PARANOID(rc); 868 EFX_WARN_ON_PARANOID(rc);
869 ip_mask->ip4src = ~0; 869 ip_mask->ip4src = ~0;
870 ip_mask->psrc = ~0; 870 ip_mask->psrc = ~0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e2d083228f3a..719be3912aa9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -22,6 +22,9 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#ifndef __COMMON_H__
26#define __COMMON_H__
27
25#include <linux/etherdevice.h> 28#include <linux/etherdevice.h>
26#include <linux/netdevice.h> 29#include <linux/netdevice.h>
27#include <linux/phy.h> 30#include <linux/phy.h>
@@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
366 369
367extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); 370extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
368extern const struct stmmac_ring_mode_ops ring_mode_ops; 371extern const struct stmmac_ring_mode_ops ring_mode_ops;
372
373#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 9820ec842cc0..223adf95fd03 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -20,6 +20,10 @@
20 20
21 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 21 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
22*******************************************************************************/ 22*******************************************************************************/
23
24#ifndef __DESCS_H__
25#define __DESCS_H__
26
23struct dma_desc { 27struct dma_desc {
24 /* Receive descriptor */ 28 /* Receive descriptor */
25 union { 29 union {
@@ -166,3 +170,5 @@ enum tdes_csum_insertion {
166 * is not calculated */ 170 * is not calculated */
167 cic_full = 3, /* IP header and pseudoheader */ 171 cic_full = 3, /* IP header and pseudoheader */
168}; 172};
173
174#endif /* __DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index dd8d6e19dff6..7ee9499a6e38 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -27,6 +27,9 @@
27 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 27 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
28*******************************************************************************/ 28*******************************************************************************/
29 29
30#ifndef __DESC_COM_H__
31#define __DESC_COM_H__
32
30#if defined(CONFIG_STMMAC_RING) 33#if defined(CONFIG_STMMAC_RING)
31static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) 34static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
32{ 35{
@@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
124 p->des01.tx.buffer1_size = len; 127 p->des01.tx.buffer1_size = len;
125} 128}
126#endif 129#endif
130
131#endif /* __DESC_COM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
index 7c6d857a9cc7..2ec6aeae349e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -22,6 +22,9 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#ifndef __DWMAC100_H__
26#define __DWMAC100_H__
27
25#include <linux/phy.h> 28#include <linux/phy.h>
26#include "common.h" 29#include "common.h"
27 30
@@ -119,3 +122,5 @@ enum ttc_control {
119#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ 122#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
120 123
121extern const struct stmmac_dma_ops dwmac100_dma_ops; 124extern const struct stmmac_dma_ops dwmac100_dma_ops;
125
126#endif /* __DWMAC100_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index f90fcb5f9573..0e4cacedc1f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -19,6 +19,8 @@
19 19
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22#ifndef __DWMAC1000_H__
23#define __DWMAC1000_H__
22 24
23#include <linux/phy.h> 25#include <linux/phy.h>
24#include "common.h" 26#include "common.h"
@@ -229,6 +231,7 @@ enum rtc_control {
229#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 231#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
230 232
231/* Synopsys Core versions */ 233/* Synopsys Core versions */
232#define DWMAC_CORE_3_40 34 234#define DWMAC_CORE_3_40 0x34
233 235
234extern const struct stmmac_dma_ops dwmac1000_dma_ops; 236extern const struct stmmac_dma_ops dwmac1000_dma_ops;
237#endif /* __DWMAC1000_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index e678ce39d014..e49c9a0fd6ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -22,6 +22,9 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#ifndef __DWMAC_DMA_H__
26#define __DWMAC_DMA_H__
27
25/* DMA CRS Control and Status Register Mapping */ 28/* DMA CRS Control and Status Register Mapping */
26#define DMA_BUS_MODE 0x00001000 /* Bus Mode */ 29#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
27#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ 30#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
@@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr);
109extern void dwmac_dma_stop_rx(void __iomem *ioaddr); 112extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
110extern int dwmac_dma_interrupt(void __iomem *ioaddr, 113extern int dwmac_dma_interrupt(void __iomem *ioaddr,
111 struct stmmac_extra_stats *x); 114 struct stmmac_extra_stats *x);
115
116#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index a38352024cb8..67995ef25251 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -22,6 +22,9 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#ifndef __MMC_H__
26#define __MMC_H__
27
25/* MMC control register */ 28/* MMC control register */
26/* When set, all counter are reset */ 29/* When set, all counter are reset */
27#define MMC_CNTRL_COUNTER_RESET 0x1 30#define MMC_CNTRL_COUNTER_RESET 0x1
@@ -129,3 +132,5 @@ struct stmmac_counters {
129extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); 132extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
130extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); 133extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
131extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); 134extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
135
136#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index c07cfe989f6e..0c74a702d461 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -33,7 +33,7 @@
33#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ 33#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
34#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ 34#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
35#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ 35#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
36#define MMC_DEFAUL_MASK 0xffffffff 36#define MMC_DEFAULT_MASK 0xffffffff
37 37
38/* MMC TX counter registers */ 38/* MMC TX counter registers */
39 39
@@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
147/* To mask all all interrupts.*/ 147/* To mask all all interrupts.*/
148void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) 148void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
149{ 149{
150 writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK); 150 writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
151 writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK); 151 writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
152} 152}
153 153
154/* This reads the MAC core counters (if actaully supported). 154/* This reads the MAC core counters (if actaully supported).
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f2d3665430ad..e872e1da3137 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -20,6 +20,9 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#ifndef __STMMAC_H__
24#define __STMMAC_H__
25
23#define STMMAC_RESOURCE_NAME "stmmaceth" 26#define STMMAC_RESOURCE_NAME "stmmaceth"
24#define DRV_MODULE_VERSION "March_2012" 27#define DRV_MODULE_VERSION "March_2012"
25 28
@@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void)
166{ 169{
167} 170}
168#endif /* CONFIG_STMMAC_PCI */ 171#endif /* CONFIG_STMMAC_PCI */
172
173#endif /* __STMMAC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index fd8882f9602a..c136162e6473 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2077,7 +2077,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2077 goto error_netdev_register; 2077 goto error_netdev_register;
2078 } 2078 }
2079 2079
2080 priv->stmmac_clk = clk_get(priv->device, NULL); 2080 priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
2081 if (IS_ERR(priv->stmmac_clk)) { 2081 if (IS_ERR(priv->stmmac_clk)) {
2082 pr_warning("%s: warning: cannot get CSR clock\n", __func__); 2082 pr_warning("%s: warning: cannot get CSR clock\n", __func__);
2083 goto error_clk_get; 2083 goto error_clk_get;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
index 6863590d184b..aea9b14cdfbe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
@@ -21,6 +21,8 @@
21 21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24#ifndef __STMMAC_TIMER_H__
25#define __STMMAC_TIMER_H__
24 26
25struct stmmac_timer { 27struct stmmac_timer {
26 void (*timer_start) (unsigned int new_freq); 28 void (*timer_start) (unsigned int new_freq);
@@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev);
40extern int tmu2_register_user(void *fnt, void *data); 42extern int tmu2_register_user(void *fnt, void *data);
41extern void tmu2_unregister_user(void); 43extern void tmu2_unregister_user(void);
42#endif 44#endif
45
46#endif /* __STMMAC_TIMER_H__ */
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 3b5c4571b55e..d15c888e9df8 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -538,11 +538,12 @@ EXPORT_SYMBOL_GPL(cpdma_chan_create);
538 538
539int cpdma_chan_destroy(struct cpdma_chan *chan) 539int cpdma_chan_destroy(struct cpdma_chan *chan)
540{ 540{
541 struct cpdma_ctlr *ctlr = chan->ctlr; 541 struct cpdma_ctlr *ctlr;
542 unsigned long flags; 542 unsigned long flags;
543 543
544 if (!chan) 544 if (!chan)
545 return -EINVAL; 545 return -EINVAL;
546 ctlr = chan->ctlr;
546 547
547 spin_lock_irqsave(&ctlr->lock, flags); 548 spin_lock_irqsave(&ctlr->lock, flags);
548 if (chan->state != CPDMA_STATE_IDLE) 549 if (chan->state != CPDMA_STATE_IDLE)
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index cd7ee204e94a..a9ca4a03d31b 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -394,8 +394,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev)
394 struct device *dev = &pdev->dev; 394 struct device *dev = &pdev->dev;
395 struct davinci_mdio_data *data = dev_get_drvdata(dev); 395 struct davinci_mdio_data *data = dev_get_drvdata(dev);
396 396
397 if (data->bus) 397 if (data->bus) {
398 mdiobus_unregister(data->bus);
398 mdiobus_free(data->bus); 399 mdiobus_free(data->bus);
400 }
399 401
400 if (data->clk) 402 if (data->clk)
401 clk_put(data->clk); 403 clk_put(data->clk);
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 24d8566cfd8b..441b4dc79450 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -673,7 +673,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
673 sm_pm_get_ls(smc,port_to_mib(smc,port))) ; 673 sm_pm_get_ls(smc,port_to_mib(smc,port))) ;
674 break ; 674 break ;
675 case SMT_P_REASON : 675 case SMT_P_REASON :
676 * (u_long *) to = 0 ; 676 *(u32 *)to = 0 ;
677 sp_len = 4 ; 677 sp_len = 4 ;
678 goto sp_done ; 678 goto sp_done ;
679 case SMT_P1033 : /* time stamp */ 679 case SMT_P1033 : /* time stamp */
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 824e2a93fe8a..5f3aeac3f86d 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -542,6 +542,7 @@ static int ks959_net_open(struct net_device *netdev)
542 sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); 542 sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
543 kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); 543 kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
544 if (!kingsun->irlap) { 544 if (!kingsun->irlap) {
545 err = -ENOMEM;
545 dev_err(&kingsun->usbdev->dev, "irlap_open failed\n"); 546 dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");
546 goto free_mem; 547 goto free_mem;
547 } 548 }
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 5a278ab83c2f..2d4b6a1ab202 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -436,6 +436,7 @@ static int ksdazzle_net_open(struct net_device *netdev)
436 sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); 436 sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
437 kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); 437 kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
438 if (!kingsun->irlap) { 438 if (!kingsun->irlap) {
439 err = -ENOMEM;
439 dev_err(&kingsun->usbdev->dev, "irlap_open failed\n"); 440 dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");
440 goto free_mem; 441 goto free_mem;
441 } 442 }
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index f9347ea3d381..b3321129a83c 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -640,15 +640,9 @@ static int netconsole_netdev_event(struct notifier_block *this,
640 * rtnl_lock already held 640 * rtnl_lock already held
641 */ 641 */
642 if (nt->np.dev) { 642 if (nt->np.dev) {
643 spin_unlock_irqrestore(
644 &target_list_lock,
645 flags);
646 __netpoll_cleanup(&nt->np); 643 __netpoll_cleanup(&nt->np);
647 spin_lock_irqsave(&target_list_lock,
648 flags);
649 dev_put(nt->np.dev); 644 dev_put(nt->np.dev);
650 nt->np.dev = NULL; 645 nt->np.dev = NULL;
651 netconsole_target_put(nt);
652 } 646 }
653 nt->enabled = 0; 647 nt->enabled = 0;
654 stopped = true; 648 stopped = true;
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 5c120189ec86..4d4d25efc1e1 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -132,7 +132,7 @@ int mdio_mux_init(struct device *dev,
132 pb->mii_bus = parent_bus; 132 pb->mii_bus = parent_bus;
133 133
134 ret_val = -ENODEV; 134 ret_val = -ENODEV;
135 for_each_child_of_node(dev->of_node, child_bus_node) { 135 for_each_available_child_of_node(dev->of_node, child_bus_node) {
136 u32 v; 136 u32 v;
137 137
138 r = of_property_read_u32(child_bus_node, "reg", &v); 138 r = of_property_read_u32(child_bus_node, "reg", &v);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 87707ab39430..341b65dbbcd3 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -795,16 +795,17 @@ static void team_port_leave(struct team *team, struct team_port *port)
795} 795}
796 796
797#ifdef CONFIG_NET_POLL_CONTROLLER 797#ifdef CONFIG_NET_POLL_CONTROLLER
798static int team_port_enable_netpoll(struct team *team, struct team_port *port) 798static int team_port_enable_netpoll(struct team *team, struct team_port *port,
799 gfp_t gfp)
799{ 800{
800 struct netpoll *np; 801 struct netpoll *np;
801 int err; 802 int err;
802 803
803 np = kzalloc(sizeof(*np), GFP_KERNEL); 804 np = kzalloc(sizeof(*np), gfp);
804 if (!np) 805 if (!np)
805 return -ENOMEM; 806 return -ENOMEM;
806 807
807 err = __netpoll_setup(np, port->dev); 808 err = __netpoll_setup(np, port->dev, gfp);
808 if (err) { 809 if (err) {
809 kfree(np); 810 kfree(np);
810 return err; 811 return err;
@@ -833,7 +834,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team)
833} 834}
834 835
835#else 836#else
836static int team_port_enable_netpoll(struct team *team, struct team_port *port) 837static int team_port_enable_netpoll(struct team *team, struct team_port *port,
838 gfp_t gfp)
837{ 839{
838 return 0; 840 return 0;
839} 841}
@@ -913,7 +915,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
913 } 915 }
914 916
915 if (team_netpoll_info(team)) { 917 if (team_netpoll_info(team)) {
916 err = team_port_enable_netpoll(team, port); 918 err = team_port_enable_netpoll(team, port, GFP_KERNEL);
917 if (err) { 919 if (err) {
918 netdev_err(dev, "Failed to enable netpoll on device %s\n", 920 netdev_err(dev, "Failed to enable netpoll on device %s\n",
919 portname); 921 portname);
@@ -1443,7 +1445,7 @@ static void team_netpoll_cleanup(struct net_device *dev)
1443} 1445}
1444 1446
1445static int team_netpoll_setup(struct net_device *dev, 1447static int team_netpoll_setup(struct net_device *dev,
1446 struct netpoll_info *npifo) 1448 struct netpoll_info *npifo, gfp_t gfp)
1447{ 1449{
1448 struct team *team = netdev_priv(dev); 1450 struct team *team = netdev_priv(dev);
1449 struct team_port *port; 1451 struct team_port *port;
@@ -1451,7 +1453,7 @@ static int team_netpoll_setup(struct net_device *dev,
1451 1453
1452 mutex_lock(&team->lock); 1454 mutex_lock(&team->lock);
1453 list_for_each_entry(port, &team->port_list, list) { 1455 list_for_each_entry(port, &team->port_list, list) {
1454 err = team_port_enable_netpoll(team, port); 1456 err = team_port_enable_netpoll(team, port, gfp);
1455 if (err) { 1457 if (err) {
1456 __team_netpoll_cleanup(team); 1458 __team_netpoll_cleanup(team);
1457 break; 1459 break;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 2ea126a16d79..adfab3fc5478 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -247,30 +247,12 @@ err:
247 */ 247 */
248static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf) 248static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
249{ 249{
250 int rv;
251 struct qmi_wwan_state *info = (void *)&dev->data; 250 struct qmi_wwan_state *info = (void *)&dev->data;
252 251
253 /* ZTE makes devices where the interface descriptors and endpoint
254 * configurations of two or more interfaces are identical, even
255 * though the functions are completely different. If set, then
256 * driver_info->data is a bitmap of acceptable interface numbers
257 * allowing us to bind to one such interface without binding to
258 * all of them
259 */
260 if (dev->driver_info->data &&
261 !test_bit(intf->cur_altsetting->desc.bInterfaceNumber, &dev->driver_info->data)) {
262 dev_info(&intf->dev, "not on our whitelist - ignored");
263 rv = -ENODEV;
264 goto err;
265 }
266
267 /* control and data is shared */ 252 /* control and data is shared */
268 info->control = intf; 253 info->control = intf;
269 info->data = intf; 254 info->data = intf;
270 rv = qmi_wwan_register_subdriver(dev); 255 return qmi_wwan_register_subdriver(dev);
271
272err:
273 return rv;
274} 256}
275 257
276static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) 258static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
@@ -356,214 +338,64 @@ static const struct driver_info qmi_wwan_shared = {
356 .manage_power = qmi_wwan_manage_power, 338 .manage_power = qmi_wwan_manage_power,
357}; 339};
358 340
359static const struct driver_info qmi_wwan_force_int0 = {
360 .description = "Qualcomm WWAN/QMI device",
361 .flags = FLAG_WWAN,
362 .bind = qmi_wwan_bind_shared,
363 .unbind = qmi_wwan_unbind,
364 .manage_power = qmi_wwan_manage_power,
365 .data = BIT(0), /* interface whitelist bitmap */
366};
367
368static const struct driver_info qmi_wwan_force_int1 = {
369 .description = "Qualcomm WWAN/QMI device",
370 .flags = FLAG_WWAN,
371 .bind = qmi_wwan_bind_shared,
372 .unbind = qmi_wwan_unbind,
373 .manage_power = qmi_wwan_manage_power,
374 .data = BIT(1), /* interface whitelist bitmap */
375};
376
377static const struct driver_info qmi_wwan_force_int2 = {
378 .description = "Qualcomm WWAN/QMI device",
379 .flags = FLAG_WWAN,
380 .bind = qmi_wwan_bind_shared,
381 .unbind = qmi_wwan_unbind,
382 .manage_power = qmi_wwan_manage_power,
383 .data = BIT(2), /* interface whitelist bitmap */
384};
385
386static const struct driver_info qmi_wwan_force_int3 = {
387 .description = "Qualcomm WWAN/QMI device",
388 .flags = FLAG_WWAN,
389 .bind = qmi_wwan_bind_shared,
390 .unbind = qmi_wwan_unbind,
391 .manage_power = qmi_wwan_manage_power,
392 .data = BIT(3), /* interface whitelist bitmap */
393};
394
395static const struct driver_info qmi_wwan_force_int4 = {
396 .description = "Qualcomm WWAN/QMI device",
397 .flags = FLAG_WWAN,
398 .bind = qmi_wwan_bind_shared,
399 .unbind = qmi_wwan_unbind,
400 .manage_power = qmi_wwan_manage_power,
401 .data = BIT(4), /* interface whitelist bitmap */
402};
403
404/* Sierra Wireless provide equally useless interface descriptors
405 * Devices in QMI mode can be switched between two different
406 * configurations:
407 * a) USB interface #8 is QMI/wwan
408 * b) USB interfaces #8, #19 and #20 are QMI/wwan
409 *
410 * Both configurations provide a number of other interfaces (serial++),
411 * some of which have the same endpoint configuration as we expect, so
412 * a whitelist or blacklist is necessary.
413 *
414 * FIXME: The below whitelist should include BIT(20). It does not
415 * because I cannot get it to work...
416 */
417static const struct driver_info qmi_wwan_sierra = {
418 .description = "Sierra Wireless wwan/QMI device",
419 .flags = FLAG_WWAN,
420 .bind = qmi_wwan_bind_shared,
421 .unbind = qmi_wwan_unbind,
422 .manage_power = qmi_wwan_manage_power,
423 .data = BIT(8) | BIT(19), /* interface whitelist bitmap */
424};
425
426#define HUAWEI_VENDOR_ID 0x12D1 341#define HUAWEI_VENDOR_ID 0x12D1
427 342
343/* map QMI/wwan function by a fixed interface number */
344#define QMI_FIXED_INTF(vend, prod, num) \
345 USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
346 .driver_info = (unsigned long)&qmi_wwan_shared
347
428/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */ 348/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
429#define QMI_GOBI1K_DEVICE(vend, prod) \ 349#define QMI_GOBI1K_DEVICE(vend, prod) \
430 USB_DEVICE(vend, prod), \ 350 QMI_FIXED_INTF(vend, prod, 3)
431 .driver_info = (unsigned long)&qmi_wwan_force_int3
432 351
433/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */ 352/* Gobi 2000/3000 QMI/wwan interface number is 0 according to qcserial */
434#define QMI_GOBI_DEVICE(vend, prod) \ 353#define QMI_GOBI_DEVICE(vend, prod) \
435 USB_DEVICE(vend, prod), \ 354 QMI_FIXED_INTF(vend, prod, 0)
436 .driver_info = (unsigned long)&qmi_wwan_force_int0
437 355
438static const struct usb_device_id products[] = { 356static const struct usb_device_id products[] = {
357 /* 1. CDC ECM like devices match on the control interface */
439 { /* Huawei E392, E398 and possibly others sharing both device id and more... */ 358 { /* Huawei E392, E398 and possibly others sharing both device id and more... */
440 .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, 359 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 9),
441 .idVendor = HUAWEI_VENDOR_ID,
442 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
443 .bInterfaceSubClass = 1,
444 .bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */
445 .driver_info = (unsigned long)&qmi_wwan_info, 360 .driver_info = (unsigned long)&qmi_wwan_info,
446 }, 361 },
447 { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ 362 { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
448 .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, 363 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
449 .idVendor = HUAWEI_VENDOR_ID,
450 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
451 .bInterfaceSubClass = 1,
452 .bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */
453 .driver_info = (unsigned long)&qmi_wwan_info, 364 .driver_info = (unsigned long)&qmi_wwan_info,
454 }, 365 },
455 { /* Huawei E392, E398 and possibly others in "Windows mode" 366
456 * using a combined control and data interface without any CDC 367 /* 2. Combined interface devices matching on class+protocol */
457 * functional descriptors 368 { /* Huawei E392, E398 and possibly others in "Windows mode" */
458 */ 369 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
459 .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO,
460 .idVendor = HUAWEI_VENDOR_ID,
461 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
462 .bInterfaceSubClass = 1,
463 .bInterfaceProtocol = 17,
464 .driver_info = (unsigned long)&qmi_wwan_shared, 370 .driver_info = (unsigned long)&qmi_wwan_shared,
465 }, 371 },
466 { /* Pantech UML290 */ 372 { /* Pantech UML290 */
467 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, 373 USB_DEVICE_AND_INTERFACE_INFO(0x106c, 0x3718, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
468 .idVendor = 0x106c,
469 .idProduct = 0x3718,
470 .bInterfaceClass = 0xff,
471 .bInterfaceSubClass = 0xf0,
472 .bInterfaceProtocol = 0xff,
473 .driver_info = (unsigned long)&qmi_wwan_shared, 374 .driver_info = (unsigned long)&qmi_wwan_shared,
474 }, 375 },
475 { /* ZTE MF820D */ 376 { /* Pantech UML290 - newer firmware */
476 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, 377 USB_DEVICE_AND_INTERFACE_INFO(0x106c, 0x3718, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff),
477 .idVendor = 0x19d2, 378 .driver_info = (unsigned long)&qmi_wwan_shared,
478 .idProduct = 0x0167,
479 .bInterfaceClass = 0xff,
480 .bInterfaceSubClass = 0xff,
481 .bInterfaceProtocol = 0xff,
482 .driver_info = (unsigned long)&qmi_wwan_force_int4,
483 },
484 { /* ZTE MF821D */
485 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
486 .idVendor = 0x19d2,
487 .idProduct = 0x0326,
488 .bInterfaceClass = 0xff,
489 .bInterfaceSubClass = 0xff,
490 .bInterfaceProtocol = 0xff,
491 .driver_info = (unsigned long)&qmi_wwan_force_int4,
492 },
493 { /* ZTE (Vodafone) K3520-Z */
494 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
495 .idVendor = 0x19d2,
496 .idProduct = 0x0055,
497 .bInterfaceClass = 0xff,
498 .bInterfaceSubClass = 0xff,
499 .bInterfaceProtocol = 0xff,
500 .driver_info = (unsigned long)&qmi_wwan_force_int1,
501 },
502 { /* ZTE (Vodafone) K3565-Z */
503 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
504 .idVendor = 0x19d2,
505 .idProduct = 0x0063,
506 .bInterfaceClass = 0xff,
507 .bInterfaceSubClass = 0xff,
508 .bInterfaceProtocol = 0xff,
509 .driver_info = (unsigned long)&qmi_wwan_force_int4,
510 },
511 { /* ZTE (Vodafone) K3570-Z */
512 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
513 .idVendor = 0x19d2,
514 .idProduct = 0x1008,
515 .bInterfaceClass = 0xff,
516 .bInterfaceSubClass = 0xff,
517 .bInterfaceProtocol = 0xff,
518 .driver_info = (unsigned long)&qmi_wwan_force_int4,
519 },
520 { /* ZTE (Vodafone) K3571-Z */
521 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
522 .idVendor = 0x19d2,
523 .idProduct = 0x1010,
524 .bInterfaceClass = 0xff,
525 .bInterfaceSubClass = 0xff,
526 .bInterfaceProtocol = 0xff,
527 .driver_info = (unsigned long)&qmi_wwan_force_int4,
528 },
529 { /* ZTE (Vodafone) K3765-Z */
530 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
531 .idVendor = 0x19d2,
532 .idProduct = 0x2002,
533 .bInterfaceClass = 0xff,
534 .bInterfaceSubClass = 0xff,
535 .bInterfaceProtocol = 0xff,
536 .driver_info = (unsigned long)&qmi_wwan_force_int4,
537 },
538 { /* ZTE (Vodafone) K4505-Z */
539 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
540 .idVendor = 0x19d2,
541 .idProduct = 0x0104,
542 .bInterfaceClass = 0xff,
543 .bInterfaceSubClass = 0xff,
544 .bInterfaceProtocol = 0xff,
545 .driver_info = (unsigned long)&qmi_wwan_force_int4,
546 },
547 { /* ZTE MF60 */
548 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
549 .idVendor = 0x19d2,
550 .idProduct = 0x1402,
551 .bInterfaceClass = 0xff,
552 .bInterfaceSubClass = 0xff,
553 .bInterfaceProtocol = 0xff,
554 .driver_info = (unsigned long)&qmi_wwan_force_int2,
555 },
556 { /* Sierra Wireless MC77xx in QMI mode */
557 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
558 .idVendor = 0x1199,
559 .idProduct = 0x68a2,
560 .bInterfaceClass = 0xff,
561 .bInterfaceSubClass = 0xff,
562 .bInterfaceProtocol = 0xff,
563 .driver_info = (unsigned long)&qmi_wwan_sierra,
564 }, 379 },
565 380
566 /* Gobi 1000 devices */ 381 /* 3. Combined interface devices matching on interface number */
382 {QMI_FIXED_INTF(0x19d2, 0x0055, 1)}, /* ZTE (Vodafone) K3520-Z */
383 {QMI_FIXED_INTF(0x19d2, 0x0063, 4)}, /* ZTE (Vodafone) K3565-Z */
384 {QMI_FIXED_INTF(0x19d2, 0x0104, 4)}, /* ZTE (Vodafone) K4505-Z */
385 {QMI_FIXED_INTF(0x19d2, 0x0167, 4)}, /* ZTE MF820D */
386 {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
387 {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
388 {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
389 {QMI_FIXED_INTF(0x19d2, 0x1018, 3)}, /* ZTE (Vodafone) K5006-Z */
390 {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
391 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
392 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
393 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
394 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
395 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
396 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
397
398 /* 4. Gobi 1000 devices */
567 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 399 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
568 {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ 400 {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
569 {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ 401 {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
@@ -579,9 +411,11 @@ static const struct usb_device_id products[] = {
579 {QMI_GOBI1K_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ 411 {QMI_GOBI1K_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
580 {QMI_GOBI1K_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ 412 {QMI_GOBI1K_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
581 413
582 /* Gobi 2000 and 3000 devices */ 414 /* 5. Gobi 2000 and 3000 devices */
583 {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ 415 {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
416 {QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
584 {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ 417 {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
418 {QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
585 {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ 419 {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
586 {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ 420 {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
587 {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ 421 {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
@@ -589,6 +423,8 @@ static const struct usb_device_id products[] = {
589 {QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */ 423 {QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
590 {QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */ 424 {QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
591 {QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */ 425 {QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
426 {QMI_GOBI_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
427 {QMI_GOBI_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */
592 {QMI_GOBI_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ 428 {QMI_GOBI_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
593 {QMI_GOBI_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ 429 {QMI_GOBI_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
594 {QMI_GOBI_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ 430 {QMI_GOBI_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
@@ -600,11 +436,16 @@ static const struct usb_device_id products[] = {
600 {QMI_GOBI_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ 436 {QMI_GOBI_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
601 {QMI_GOBI_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ 437 {QMI_GOBI_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
602 {QMI_GOBI_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */ 438 {QMI_GOBI_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
439 {QMI_FIXED_INTF(0x1199, 0x9011, 5)}, /* alternate interface number!? */
603 {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ 440 {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
604 {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ 441 {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
605 {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ 442 {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
606 {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ 443 {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
607 {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ 444 {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
445 {QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
446 {QMI_GOBI_DEVICE(0x12d1, 0x14f1)}, /* Sony Gobi 3000 Composite */
447 {QMI_GOBI_DEVICE(0x1410, 0xa021)}, /* Foxconn Gobi 3000 Modem device (Novatel E396) */
448
608 { } /* END */ 449 { } /* END */
609}; 450};
610MODULE_DEVICE_TABLE(usb, products); 451MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index d75d1f56becf..7be49ea60b6d 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -68,15 +68,8 @@ static atomic_t iface_counter = ATOMIC_INIT(0);
68 */ 68 */
69#define SIERRA_NET_USBCTL_BUF_LEN 1024 69#define SIERRA_NET_USBCTL_BUF_LEN 1024
70 70
71/* list of interface numbers - used for constructing interface lists */
72struct sierra_net_iface_info {
73 const u32 infolen; /* number of interface numbers on list */
74 const u8 *ifaceinfo; /* pointer to the array holding the numbers */
75};
76
77struct sierra_net_info_data { 71struct sierra_net_info_data {
78 u16 rx_urb_size; 72 u16 rx_urb_size;
79 struct sierra_net_iface_info whitelist;
80}; 73};
81 74
82/* Private data structure */ 75/* Private data structure */
@@ -637,21 +630,6 @@ static int sierra_net_change_mtu(struct net_device *net, int new_mtu)
637 return usbnet_change_mtu(net, new_mtu); 630 return usbnet_change_mtu(net, new_mtu);
638} 631}
639 632
640static int is_whitelisted(const u8 ifnum,
641 const struct sierra_net_iface_info *whitelist)
642{
643 if (whitelist) {
644 const u8 *list = whitelist->ifaceinfo;
645 int i;
646
647 for (i = 0; i < whitelist->infolen; i++) {
648 if (list[i] == ifnum)
649 return 1;
650 }
651 }
652 return 0;
653}
654
655static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) 633static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
656{ 634{
657 int result = 0; 635 int result = 0;
@@ -706,11 +684,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
706 dev_dbg(&dev->udev->dev, "%s", __func__); 684 dev_dbg(&dev->udev->dev, "%s", __func__);
707 685
708 ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; 686 ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
709 /* We only accept certain interfaces */
710 if (!is_whitelisted(ifacenum, &data->whitelist)) {
711 dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum);
712 return -ENODEV;
713 }
714 numendpoints = intf->cur_altsetting->desc.bNumEndpoints; 687 numendpoints = intf->cur_altsetting->desc.bNumEndpoints;
715 /* We have three endpoints, bulk in and out, and a status */ 688 /* We have three endpoints, bulk in and out, and a status */
716 if (numendpoints != 3) { 689 if (numendpoints != 3) {
@@ -945,13 +918,8 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
945 return NULL; 918 return NULL;
946} 919}
947 920
948static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
949static const struct sierra_net_info_data sierra_net_info_data_direct_ip = { 921static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
950 .rx_urb_size = 8 * 1024, 922 .rx_urb_size = 8 * 1024,
951 .whitelist = {
952 .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
953 .ifaceinfo = sierra_net_ifnum_list
954 }
955}; 923};
956 924
957static const struct driver_info sierra_net_info_direct_ip = { 925static const struct driver_info sierra_net_info_direct_ip = {
@@ -965,15 +933,19 @@ static const struct driver_info sierra_net_info_direct_ip = {
965 .data = (unsigned long)&sierra_net_info_data_direct_ip, 933 .data = (unsigned long)&sierra_net_info_data_direct_ip,
966}; 934};
967 935
936#define DIRECT_IP_DEVICE(vend, prod) \
937 {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \
938 .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \
939 {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 10), \
940 .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \
941 {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 11), \
942 .driver_info = (unsigned long)&sierra_net_info_direct_ip}
943
968static const struct usb_device_id products[] = { 944static const struct usb_device_id products[] = {
969 {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ 945 DIRECT_IP_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
970 .driver_info = (unsigned long) &sierra_net_info_direct_ip}, 946 DIRECT_IP_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
971 {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ 947 DIRECT_IP_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
972 .driver_info = (unsigned long) &sierra_net_info_direct_ip}, 948 DIRECT_IP_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
973 {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
974 .driver_info = (unsigned long) &sierra_net_info_direct_ip},
975 {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
976 .driver_info = (unsigned long) &sierra_net_info_direct_ip},
977 949
978 {}, /* last item */ 950 {}, /* last item */
979}; 951};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8531c1caac28..fd4b26d46fd5 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1573,7 +1573,7 @@ int usbnet_resume (struct usb_interface *intf)
1573 netif_device_present(dev->net) && 1573 netif_device_present(dev->net) &&
1574 !timer_pending(&dev->delay) && 1574 !timer_pending(&dev->delay) &&
1575 !test_bit(EVENT_RX_HALT, &dev->flags)) 1575 !test_bit(EVENT_RX_HALT, &dev->flags))
1576 rx_alloc_submit(dev, GFP_KERNEL); 1576 rx_alloc_submit(dev, GFP_NOIO);
1577 1577
1578 if (!(dev->txq.qlen >= TX_QLEN(dev))) 1578 if (!(dev->txq.qlen >= TX_QLEN(dev)))
1579 netif_tx_wake_all_queues(dev->net); 1579 netif_tx_wake_all_queues(dev->net);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 93e0cfb739b8..ce9d4f2c9776 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3019,6 +3019,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3019 netdev->watchdog_timeo = 5 * HZ; 3019 netdev->watchdog_timeo = 5 * HZ;
3020 3020
3021 INIT_WORK(&adapter->work, vmxnet3_reset_work); 3021 INIT_WORK(&adapter->work, vmxnet3_reset_work);
3022 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3022 3023
3023 if (adapter->intr.type == VMXNET3_IT_MSIX) { 3024 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3024 int i; 3025 int i;
@@ -3043,7 +3044,6 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3043 goto err_register; 3044 goto err_register;
3044 } 3045 }
3045 3046
3046 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3047 vmxnet3_check_link(adapter, false); 3047 vmxnet3_check_link(adapter, false);
3048 atomic_inc(&devices_found); 3048 atomic_inc(&devices_found);
3049 return 0; 3049 return 0;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 9eb6479306d6..ef36cafd44b7 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -774,14 +774,15 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev,
774 } 774 }
775 /* Global interrupt queue */ 775 /* Global interrupt queue */
776 writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1); 776 writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1);
777
778 rc = -ENOMEM;
779
777 priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev, 780 priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev,
778 IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma); 781 IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma);
779 if (!priv->iqcfg) 782 if (!priv->iqcfg)
780 goto err_free_irq_5; 783 goto err_free_irq_5;
781 writel(priv->iqcfg_dma, ioaddr + IQCFG); 784 writel(priv->iqcfg_dma, ioaddr + IQCFG);
782 785
783 rc = -ENOMEM;
784
785 /* 786 /*
786 * SCC 0-3 private rx/tx irq structures 787 * SCC 0-3 private rx/tx irq structures
787 * IQRX/TXi needs to be set soon. Learned it the hard way... 788 * IQRX/TXi needs to be set soon. Learned it the hard way...
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 283237f6f074..def12b38cbf7 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -326,8 +326,10 @@ int i2400m_barker_db_init(const char *_options)
326 unsigned barker; 326 unsigned barker;
327 327
328 options_orig = kstrdup(_options, GFP_KERNEL); 328 options_orig = kstrdup(_options, GFP_KERNEL);
329 if (options_orig == NULL) 329 if (options_orig == NULL) {
330 result = -ENOMEM;
330 goto error_parse; 331 goto error_parse;
332 }
331 options = options_orig; 333 options = options_orig;
332 334
333 while ((token = strsep(&options, ",")) != NULL) { 335 while ((token = strsep(&options, ",")) != NULL) {
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index efc162e0b511..88b8d64c90f1 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -342,7 +342,7 @@ static int at76_dfu_get_status(struct usb_device *udev,
342 return ret; 342 return ret;
343} 343}
344 344
345static u8 at76_dfu_get_state(struct usb_device *udev, u8 *state) 345static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
346{ 346{
347 int ret; 347 int ret;
348 348
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 8c4c040a47b8..2aab20ee9f38 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2056,9 +2056,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
2056void 2056void
2057ath5k_beacon_config(struct ath5k_hw *ah) 2057ath5k_beacon_config(struct ath5k_hw *ah)
2058{ 2058{
2059 unsigned long flags; 2059 spin_lock_bh(&ah->block);
2060
2061 spin_lock_irqsave(&ah->block, flags);
2062 ah->bmisscount = 0; 2060 ah->bmisscount = 0;
2063 ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); 2061 ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2064 2062
@@ -2085,7 +2083,7 @@ ath5k_beacon_config(struct ath5k_hw *ah)
2085 2083
2086 ath5k_hw_set_imr(ah, ah->imask); 2084 ath5k_hw_set_imr(ah, ah->imask);
2087 mmiowb(); 2085 mmiowb();
2088 spin_unlock_irqrestore(&ah->block, flags); 2086 spin_unlock_bh(&ah->block);
2089} 2087}
2090 2088
2091static void ath5k_tasklet_beacon(unsigned long data) 2089static void ath5k_tasklet_beacon(unsigned long data)
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 4026c906cc7b..b7e0258887e7 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
1482 case AR5K_EEPROM_MODE_11A: 1482 case AR5K_EEPROM_MODE_11A:
1483 offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version); 1483 offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
1484 rate_pcal_info = ee->ee_rate_tpwr_a; 1484 rate_pcal_info = ee->ee_rate_tpwr_a;
1485 ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN; 1485 ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN;
1486 break; 1486 break;
1487 case AR5K_EEPROM_MODE_11B: 1487 case AR5K_EEPROM_MODE_11B:
1488 offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version); 1488 offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index dc2bcfeadeb4..94a9bbea6874 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -182,6 +182,7 @@
182#define AR5K_EEPROM_EEP_DELTA 10 182#define AR5K_EEPROM_EEP_DELTA 10
183#define AR5K_EEPROM_N_MODES 3 183#define AR5K_EEPROM_N_MODES 3
184#define AR5K_EEPROM_N_5GHZ_CHAN 10 184#define AR5K_EEPROM_N_5GHZ_CHAN 10
185#define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8
185#define AR5K_EEPROM_N_2GHZ_CHAN 3 186#define AR5K_EEPROM_N_2GHZ_CHAN 3
186#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4 187#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
187#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4 188#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 260e7dc7f751..d56453e43d7e 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -254,7 +254,6 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
254 struct ath5k_vif *avf = (void *)vif->drv_priv; 254 struct ath5k_vif *avf = (void *)vif->drv_priv;
255 struct ath5k_hw *ah = hw->priv; 255 struct ath5k_hw *ah = hw->priv;
256 struct ath_common *common = ath5k_hw_common(ah); 256 struct ath_common *common = ath5k_hw_common(ah);
257 unsigned long flags;
258 257
259 mutex_lock(&ah->lock); 258 mutex_lock(&ah->lock);
260 259
@@ -300,9 +299,9 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
300 } 299 }
301 300
302 if (changes & BSS_CHANGED_BEACON) { 301 if (changes & BSS_CHANGED_BEACON) {
303 spin_lock_irqsave(&ah->block, flags); 302 spin_lock_bh(&ah->block);
304 ath5k_beacon_update(hw, vif); 303 ath5k_beacon_update(hw, vif);
305 spin_unlock_irqrestore(&ah->block, flags); 304 spin_unlock_bh(&ah->block);
306 } 305 }
307 306
308 if (changes & BSS_CHANGED_BEACON_ENABLED) 307 if (changes & BSS_CHANGED_BEACON_ENABLED)
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 7990cd55599c..b42be910a83d 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -773,15 +773,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
773} 773}
774EXPORT_SYMBOL(ath9k_hw_intrpend); 774EXPORT_SYMBOL(ath9k_hw_intrpend);
775 775
776void ath9k_hw_disable_interrupts(struct ath_hw *ah) 776void ath9k_hw_kill_interrupts(struct ath_hw *ah)
777{ 777{
778 struct ath_common *common = ath9k_hw_common(ah); 778 struct ath_common *common = ath9k_hw_common(ah);
779 779
780 if (!(ah->imask & ATH9K_INT_GLOBAL))
781 atomic_set(&ah->intr_ref_cnt, -1);
782 else
783 atomic_dec(&ah->intr_ref_cnt);
784
785 ath_dbg(common, INTERRUPT, "disable IER\n"); 780 ath_dbg(common, INTERRUPT, "disable IER\n");
786 REG_WRITE(ah, AR_IER, AR_IER_DISABLE); 781 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
787 (void) REG_READ(ah, AR_IER); 782 (void) REG_READ(ah, AR_IER);
@@ -793,6 +788,17 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah)
793 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE); 788 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
794 } 789 }
795} 790}
791EXPORT_SYMBOL(ath9k_hw_kill_interrupts);
792
793void ath9k_hw_disable_interrupts(struct ath_hw *ah)
794{
795 if (!(ah->imask & ATH9K_INT_GLOBAL))
796 atomic_set(&ah->intr_ref_cnt, -1);
797 else
798 atomic_dec(&ah->intr_ref_cnt);
799
800 ath9k_hw_kill_interrupts(ah);
801}
796EXPORT_SYMBOL(ath9k_hw_disable_interrupts); 802EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
797 803
798void ath9k_hw_enable_interrupts(struct ath_hw *ah) 804void ath9k_hw_enable_interrupts(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 0eba36dca6f8..4a745e68dd94 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -738,6 +738,7 @@ bool ath9k_hw_intrpend(struct ath_hw *ah);
738void ath9k_hw_set_interrupts(struct ath_hw *ah); 738void ath9k_hw_set_interrupts(struct ath_hw *ah);
739void ath9k_hw_enable_interrupts(struct ath_hw *ah); 739void ath9k_hw_enable_interrupts(struct ath_hw *ah);
740void ath9k_hw_disable_interrupts(struct ath_hw *ah); 740void ath9k_hw_disable_interrupts(struct ath_hw *ah);
741void ath9k_hw_kill_interrupts(struct ath_hw *ah);
741 742
742void ar9002_hw_attach_mac_ops(struct ath_hw *ah); 743void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
743 744
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6049d8b82855..a22df749b8db 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -462,8 +462,10 @@ irqreturn_t ath_isr(int irq, void *dev)
462 if (!ath9k_hw_intrpend(ah)) 462 if (!ath9k_hw_intrpend(ah))
463 return IRQ_NONE; 463 return IRQ_NONE;
464 464
465 if(test_bit(SC_OP_HW_RESET, &sc->sc_flags)) 465 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
466 ath9k_hw_kill_interrupts(ah);
466 return IRQ_HANDLED; 467 return IRQ_HANDLED;
468 }
467 469
468 /* 470 /*
469 * Figure out the reason(s) for the interrupt. Note 471 * Figure out the reason(s) for the interrupt. Note
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index d455de9162ec..a978984d78a5 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -321,6 +321,7 @@ static int ath_pci_suspend(struct device *device)
321 * Otherwise the chip never moved to full sleep, 321 * Otherwise the chip never moved to full sleep,
322 * when no interface is up. 322 * when no interface is up.
323 */ 323 */
324 ath9k_stop_btcoex(sc);
324 ath9k_hw_disable(sc->sc_ah); 325 ath9k_hw_disable(sc->sc_ah);
325 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); 326 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
326 327
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 12aca02228c2..4480c0cc655f 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1044,7 +1044,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1044 struct ieee80211_hw *hw = sc->hw; 1044 struct ieee80211_hw *hw = sc->hw;
1045 struct ieee80211_hdr *hdr; 1045 struct ieee80211_hdr *hdr;
1046 int retval; 1046 int retval;
1047 bool decrypt_error = false;
1048 struct ath_rx_status rs; 1047 struct ath_rx_status rs;
1049 enum ath9k_rx_qtype qtype; 1048 enum ath9k_rx_qtype qtype;
1050 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1049 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
@@ -1066,6 +1065,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1066 tsf_lower = tsf & 0xffffffff; 1065 tsf_lower = tsf & 0xffffffff;
1067 1066
1068 do { 1067 do {
1068 bool decrypt_error = false;
1069 /* If handling rx interrupt and flush is in progress => exit */ 1069 /* If handling rx interrupt and flush is in progress => exit */
1070 if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0)) 1070 if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0))
1071 break; 1071 break;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 192ad5c1fcc8..a5edebeb0b4f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1233,6 +1233,9 @@ uint brcms_reset(struct brcms_info *wl)
1233 /* dpc will not be rescheduled */ 1233 /* dpc will not be rescheduled */
1234 wl->resched = false; 1234 wl->resched = false;
1235 1235
1236 /* inform publicly that interface is down */
1237 wl->pub->up = false;
1238
1236 return 0; 1239 return 0;
1237} 1240}
1238 1241
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 95aa8e1683ec..83324b321652 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
2042 return; 2042 return;
2043 } 2043 }
2044 len = ETH_ALEN; 2044 len = ETH_ALEN;
2045 ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len); 2045 ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid,
2046 &len);
2046 if (ret) { 2047 if (ret) {
2047 IPW_DEBUG_INFO("failed querying ordinals at line %d\n", 2048 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
2048 __LINE__); 2049 __LINE__);
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 46782f1102ac..a47b306b522c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
124 const struct fw_img *img; 124 const struct fw_img *img;
125 size_t bufsz; 125 size_t bufsz;
126 126
127 if (!iwl_is_ready_rf(priv))
128 return -EAGAIN;
129
127 /* default is to dump the entire data segment */ 130 /* default is to dump the entire data segment */
128 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { 131 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
129 priv->dbgfs_sram_offset = 0x800000; 132 priv->dbgfs_sram_offset = 0x800000;
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index d9694c58208c..4ffc18dc3a57 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -350,7 +350,7 @@ int iwl_queue_space(const struct iwl_queue *q);
350/***************************************************** 350/*****************************************************
351* Error handling 351* Error handling
352******************************************************/ 352******************************************************/
353int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); 353int iwl_dump_fh(struct iwl_trans *trans, char **buf);
354void iwl_dump_csr(struct iwl_trans *trans); 354void iwl_dump_csr(struct iwl_trans *trans);
355 355
356/***************************************************** 356/*****************************************************
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 39a6ca1f009c..d1a61ba6247a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -555,7 +555,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
555 } 555 }
556 556
557 iwl_dump_csr(trans); 557 iwl_dump_csr(trans);
558 iwl_dump_fh(trans, NULL, false); 558 iwl_dump_fh(trans, NULL);
559 559
560 iwl_op_mode_nic_error(trans->op_mode); 560 iwl_op_mode_nic_error(trans->op_mode);
561} 561}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 939c2f78df58..1e86ea2266d4 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd)
1649#undef IWL_CMD 1649#undef IWL_CMD
1650} 1650}
1651 1651
1652int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) 1652int iwl_dump_fh(struct iwl_trans *trans, char **buf)
1653{ 1653{
1654 int i; 1654 int i;
1655#ifdef CONFIG_IWLWIFI_DEBUG
1656 int pos = 0;
1657 size_t bufsz = 0;
1658#endif
1659 static const u32 fh_tbl[] = { 1655 static const u32 fh_tbl[] = {
1660 FH_RSCSR_CHNL0_STTS_WPTR_REG, 1656 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1661 FH_RSCSR_CHNL0_RBDCB_BASE_REG, 1657 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
@@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
1667 FH_TSSR_TX_STATUS_REG, 1663 FH_TSSR_TX_STATUS_REG,
1668 FH_TSSR_TX_ERROR_REG 1664 FH_TSSR_TX_ERROR_REG
1669 }; 1665 };
1670#ifdef CONFIG_IWLWIFI_DEBUG 1666
1671 if (display) { 1667#ifdef CONFIG_IWLWIFI_DEBUGFS
1672 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; 1668 if (buf) {
1669 int pos = 0;
1670 size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1671
1673 *buf = kmalloc(bufsz, GFP_KERNEL); 1672 *buf = kmalloc(bufsz, GFP_KERNEL);
1674 if (!*buf) 1673 if (!*buf)
1675 return -ENOMEM; 1674 return -ENOMEM;
1675
1676 pos += scnprintf(*buf + pos, bufsz - pos, 1676 pos += scnprintf(*buf + pos, bufsz - pos,
1677 "FH register values:\n"); 1677 "FH register values:\n");
1678 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { 1678
1679 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
1679 pos += scnprintf(*buf + pos, bufsz - pos, 1680 pos += scnprintf(*buf + pos, bufsz - pos,
1680 " %34s: 0X%08x\n", 1681 " %34s: 0X%08x\n",
1681 get_fh_string(fh_tbl[i]), 1682 get_fh_string(fh_tbl[i]),
1682 iwl_read_direct32(trans, fh_tbl[i])); 1683 iwl_read_direct32(trans, fh_tbl[i]));
1683 } 1684
1684 return pos; 1685 return pos;
1685 } 1686 }
1686#endif 1687#endif
1688
1687 IWL_ERR(trans, "FH register values:\n"); 1689 IWL_ERR(trans, "FH register values:\n");
1688 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { 1690 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
1689 IWL_ERR(trans, " %34s: 0X%08x\n", 1691 IWL_ERR(trans, " %34s: 0X%08x\n",
1690 get_fh_string(fh_tbl[i]), 1692 get_fh_string(fh_tbl[i]),
1691 iwl_read_direct32(trans, fh_tbl[i])); 1693 iwl_read_direct32(trans, fh_tbl[i]));
1692 } 1694
1693 return 0; 1695 return 0;
1694} 1696}
1695 1697
@@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1982 size_t count, loff_t *ppos) 1984 size_t count, loff_t *ppos)
1983{ 1985{
1984 struct iwl_trans *trans = file->private_data; 1986 struct iwl_trans *trans = file->private_data;
1985 char *buf; 1987 char *buf = NULL;
1986 int pos = 0; 1988 int pos = 0;
1987 ssize_t ret = -EFAULT; 1989 ssize_t ret = -EFAULT;
1988 1990
1989 ret = pos = iwl_dump_fh(trans, &buf, true); 1991 ret = pos = iwl_dump_fh(trans, &buf);
1990 if (buf) { 1992 if (buf) {
1991 ret = simple_read_from_buffer(user_buf, 1993 ret = simple_read_from_buffer(user_buf,
1992 count, ppos, buf, pos); 1994 count, ppos, buf, pos);
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 7f207b6e9552..effb044a8a9d 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -42,7 +42,7 @@ MODULE_FIRMWARE("isl3887usb");
42 * whenever you add a new device. 42 * whenever you add a new device.
43 */ 43 */
44 44
45static struct usb_device_id p54u_table[] __devinitdata = { 45static struct usb_device_id p54u_table[] = {
46 /* Version 1 devices (pci chip + net2280) */ 46 /* Version 1 devices (pci chip + net2280) */
47 {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */ 47 {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */
48 {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ 48 {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 241162e8111d..7a4ae9ee1c63 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1803,6 +1803,7 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,
1803 struct cfg80211_pmksa *pmksa, 1803 struct cfg80211_pmksa *pmksa,
1804 int max_pmkids) 1804 int max_pmkids)
1805{ 1805{
1806 struct ndis_80211_pmkid *new_pmkids;
1806 int i, err, newlen; 1807 int i, err, newlen;
1807 unsigned int count; 1808 unsigned int count;
1808 1809
@@ -1833,11 +1834,12 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,
1833 /* add new pmkid */ 1834 /* add new pmkid */
1834 newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]); 1835 newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]);
1835 1836
1836 pmkids = krealloc(pmkids, newlen, GFP_KERNEL); 1837 new_pmkids = krealloc(pmkids, newlen, GFP_KERNEL);
1837 if (!pmkids) { 1838 if (!new_pmkids) {
1838 err = -ENOMEM; 1839 err = -ENOMEM;
1839 goto error; 1840 goto error;
1840 } 1841 }
1842 pmkids = new_pmkids;
1841 1843
1842 pmkids->length = cpu_to_le32(newlen); 1844 pmkids->length = cpu_to_le32(newlen);
1843 pmkids->bssid_info_count = cpu_to_le32(count + 1); 1845 pmkids->bssid_info_count = cpu_to_le32(count + 1);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 71a30b026089..533024095c43 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -44,7 +44,7 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
44MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver"); 44MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver");
45MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
46 46
47static struct usb_device_id rtl8187_table[] __devinitdata = { 47static struct usb_device_id rtl8187_table[] = {
48 /* Asus */ 48 /* Asus */
49 {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187}, 49 {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187},
50 /* Belkin */ 50 /* Belkin */
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 30899901aef5..650f79a1f2bd 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -57,8 +57,7 @@
57static const struct ethtool_ops xennet_ethtool_ops; 57static const struct ethtool_ops xennet_ethtool_ops;
58 58
59struct netfront_cb { 59struct netfront_cb {
60 struct page *page; 60 int pull_to;
61 unsigned offset;
62}; 61};
63 62
64#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) 63#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
@@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev,
867 struct sk_buff *skb; 866 struct sk_buff *skb;
868 867
869 while ((skb = __skb_dequeue(rxq)) != NULL) { 868 while ((skb = __skb_dequeue(rxq)) != NULL) {
870 struct page *page = NETFRONT_SKB_CB(skb)->page; 869 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
871 void *vaddr = page_address(page);
872 unsigned offset = NETFRONT_SKB_CB(skb)->offset;
873
874 memcpy(skb->data, vaddr + offset,
875 skb_headlen(skb));
876 870
877 if (page != skb_frag_page(&skb_shinfo(skb)->frags[0])) 871 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
878 __free_page(page);
879 872
880 /* Ethernet work: Delayed to here as it peeks the header. */ 873 /* Ethernet work: Delayed to here as it peeks the header. */
881 skb->protocol = eth_type_trans(skb, dev); 874 skb->protocol = eth_type_trans(skb, dev);
@@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
913 struct sk_buff_head errq; 906 struct sk_buff_head errq;
914 struct sk_buff_head tmpq; 907 struct sk_buff_head tmpq;
915 unsigned long flags; 908 unsigned long flags;
916 unsigned int len;
917 int err; 909 int err;
918 910
919 spin_lock(&np->rx_lock); 911 spin_lock(&np->rx_lock);
@@ -955,24 +947,13 @@ err:
955 } 947 }
956 } 948 }
957 949
958 NETFRONT_SKB_CB(skb)->page = 950 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
959 skb_frag_page(&skb_shinfo(skb)->frags[0]); 951 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
960 NETFRONT_SKB_CB(skb)->offset = rx->offset; 952 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
961
962 len = rx->status;
963 if (len > RX_COPY_THRESHOLD)
964 len = RX_COPY_THRESHOLD;
965 skb_put(skb, len);
966 953
967 if (rx->status > len) { 954 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
968 skb_shinfo(skb)->frags[0].page_offset = 955 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
969 rx->offset + len; 956 skb->data_len = rx->status;
970 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
971 skb->data_len = rx->status - len;
972 } else {
973 __skb_fill_page_desc(skb, 0, NULL, 0, 0);
974 skb_shinfo(skb)->nr_frags = 0;
975 }
976 957
977 i = xennet_fill_frags(np, skb, &tmpq); 958 i = xennet_fill_frags(np, skb, &tmpq);
978 959
@@ -999,7 +980,7 @@ err:
999 * receive throughout using the standard receive 980 * receive throughout using the standard receive
1000 * buffer size was cut by 25%(!!!). 981 * buffer size was cut by 25%(!!!).
1001 */ 982 */
1002 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); 983 skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
1003 skb->len += skb->data_len; 984 skb->len += skb->data_len;
1004 985
1005 if (rx->flags & XEN_NETRXF_csum_blank) 986 if (rx->flags & XEN_NETRXF_csum_blank)
diff --git a/drivers/of/base.c b/drivers/of/base.c
index c181b94abc36..d4a1c9a043e1 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -364,6 +364,33 @@ struct device_node *of_get_next_child(const struct device_node *node,
364EXPORT_SYMBOL(of_get_next_child); 364EXPORT_SYMBOL(of_get_next_child);
365 365
366/** 366/**
367 * of_get_next_available_child - Find the next available child node
368 * @node: parent node
369 * @prev: previous child of the parent node, or NULL to get first
370 *
371 * This function is like of_get_next_child(), except that it
372 * automatically skips any disabled nodes (i.e. status = "disabled").
373 */
374struct device_node *of_get_next_available_child(const struct device_node *node,
375 struct device_node *prev)
376{
377 struct device_node *next;
378
379 read_lock(&devtree_lock);
380 next = prev ? prev->sibling : node->child;
381 for (; next; next = next->sibling) {
382 if (!of_device_is_available(next))
383 continue;
384 if (of_node_get(next))
385 break;
386 }
387 of_node_put(prev);
388 read_unlock(&devtree_lock);
389 return next;
390}
391EXPORT_SYMBOL(of_get_next_available_child);
392
393/**
367 * of_find_node_by_path - Find a node matching a full OF path 394 * of_find_node_by_path - Find a node matching a full OF path
368 * @path: The full path to match 395 * @path: The full path to match
369 * 396 *
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index fbf7b26c7c8a..c5792d622dc4 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -266,8 +266,8 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
266 } 266 }
267 267
268 if (!error) 268 if (!error)
269 dev_printk(KERN_INFO, &dev->dev, 269 dev_info(&dev->dev, "power state changed by ACPI to %s\n",
270 "power state changed by ACPI to D%d\n", state); 270 pci_power_name(state));
271 271
272 return error; 272 return error;
273} 273}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 185be3703343..5270f1a99328 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -959,6 +959,13 @@ static int pci_pm_poweroff_noirq(struct device *dev)
959 if (!pci_dev->state_saved && !pci_is_bridge(pci_dev)) 959 if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
960 pci_prepare_to_sleep(pci_dev); 960 pci_prepare_to_sleep(pci_dev);
961 961
962 /*
963 * The reason for doing this here is the same as for the analogous code
964 * in pci_pm_suspend_noirq().
965 */
966 if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
967 pci_write_config_word(pci_dev, PCI_COMMAND, 0);
968
962 return 0; 969 return 0;
963} 970}
964 971
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index fb7f3bebdc69..dc5c126e398a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -657,11 +657,7 @@ static struct pinctrl *pinctrl_get_locked(struct device *dev)
657 if (p != NULL) 657 if (p != NULL)
658 return ERR_PTR(-EBUSY); 658 return ERR_PTR(-EBUSY);
659 659
660 p = create_pinctrl(dev); 660 return create_pinctrl(dev);
661 if (IS_ERR(p))
662 return p;
663
664 return p;
665} 661}
666 662
667/** 663/**
@@ -738,11 +734,8 @@ static struct pinctrl_state *pinctrl_lookup_state_locked(struct pinctrl *p,
738 dev_dbg(p->dev, "using pinctrl dummy state (%s)\n", 734 dev_dbg(p->dev, "using pinctrl dummy state (%s)\n",
739 name); 735 name);
740 state = create_state(p, name); 736 state = create_state(p, name);
741 if (IS_ERR(state)) 737 } else
742 return state; 738 state = ERR_PTR(-ENODEV);
743 } else {
744 return ERR_PTR(-ENODEV);
745 }
746 } 739 }
747 740
748 return state; 741 return state;
diff --git a/drivers/pinctrl/pinctrl-imx51.c b/drivers/pinctrl/pinctrl-imx51.c
index 689b3c88dd2e..9fd02162a3c2 100644
--- a/drivers/pinctrl/pinctrl-imx51.c
+++ b/drivers/pinctrl/pinctrl-imx51.c
@@ -974,7 +974,7 @@ static struct imx_pin_reg imx51_pin_regs[] = {
974 IMX_PIN_REG(MX51_PAD_EIM_DA13, NO_PAD, 0x050, 0, 0x000, 0), /* MX51_PAD_EIM_DA13__EIM_DA13 */ 974 IMX_PIN_REG(MX51_PAD_EIM_DA13, NO_PAD, 0x050, 0, 0x000, 0), /* MX51_PAD_EIM_DA13__EIM_DA13 */
975 IMX_PIN_REG(MX51_PAD_EIM_DA14, NO_PAD, 0x054, 0, 0x000, 0), /* MX51_PAD_EIM_DA14__EIM_DA14 */ 975 IMX_PIN_REG(MX51_PAD_EIM_DA14, NO_PAD, 0x054, 0, 0x000, 0), /* MX51_PAD_EIM_DA14__EIM_DA14 */
976 IMX_PIN_REG(MX51_PAD_EIM_DA15, NO_PAD, 0x058, 0, 0x000, 0), /* MX51_PAD_EIM_DA15__EIM_DA15 */ 976 IMX_PIN_REG(MX51_PAD_EIM_DA15, NO_PAD, 0x058, 0, 0x000, 0), /* MX51_PAD_EIM_DA15__EIM_DA15 */
977 IMX_PIN_REG(MX51_PAD_SD2_CMD, NO_PAD, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */ 977 IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */
978 IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 1, 0x9b0, 2), /* MX51_PAD_SD2_CMD__I2C1_SCL */ 978 IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 1, 0x9b0, 2), /* MX51_PAD_SD2_CMD__I2C1_SCL */
979 IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 0, 0x000, 0), /* MX51_PAD_SD2_CMD__SD2_CMD */ 979 IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 0, 0x000, 0), /* MX51_PAD_SD2_CMD__SD2_CMD */
980 IMX_PIN_REG(MX51_PAD_SD2_CLK, 0x7c0, 0x3b8, 2, 0x914, 3), /* MX51_PAD_SD2_CLK__CSPI_SCLK */ 980 IMX_PIN_REG(MX51_PAD_SD2_CLK, 0x7c0, 0x3b8, 2, 0x914, 3), /* MX51_PAD_SD2_CLK__CSPI_SCLK */
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/pinctrl-nomadik-db8500.c
index 5f3e9d0221e1..a39fb7a6fc51 100644
--- a/drivers/pinctrl/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/pinctrl-nomadik-db8500.c
@@ -505,6 +505,8 @@ static const unsigned kp_b_1_pins[] = { DB8500_PIN_F3, DB8500_PIN_F1,
505 DB8500_PIN_J3, DB8500_PIN_H2, DB8500_PIN_J2, DB8500_PIN_H1, 505 DB8500_PIN_J3, DB8500_PIN_H2, DB8500_PIN_J2, DB8500_PIN_H1,
506 DB8500_PIN_F4, DB8500_PIN_E3, DB8500_PIN_E4, DB8500_PIN_D2, 506 DB8500_PIN_F4, DB8500_PIN_E3, DB8500_PIN_E4, DB8500_PIN_D2,
507 DB8500_PIN_C1, DB8500_PIN_D3, DB8500_PIN_C2, DB8500_PIN_D5 }; 507 DB8500_PIN_C1, DB8500_PIN_D3, DB8500_PIN_C2, DB8500_PIN_D5 };
508static const unsigned kp_b_2_pins[] = { DB8500_PIN_F3, DB8500_PIN_F1,
509 DB8500_PIN_G3, DB8500_PIN_G2, DB8500_PIN_F4, DB8500_PIN_E3};
508static const unsigned sm_b_1_pins[] = { DB8500_PIN_C6, DB8500_PIN_B3, 510static const unsigned sm_b_1_pins[] = { DB8500_PIN_C6, DB8500_PIN_B3,
509 DB8500_PIN_C4, DB8500_PIN_E6, DB8500_PIN_A3, DB8500_PIN_B6, 511 DB8500_PIN_C4, DB8500_PIN_E6, DB8500_PIN_A3, DB8500_PIN_B6,
510 DB8500_PIN_D6, DB8500_PIN_B7, DB8500_PIN_D7, DB8500_PIN_D8, 512 DB8500_PIN_D6, DB8500_PIN_B7, DB8500_PIN_D7, DB8500_PIN_D8,
@@ -662,6 +664,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
662 DB8500_PIN_GROUP(spi3_b_1, NMK_GPIO_ALT_B), 664 DB8500_PIN_GROUP(spi3_b_1, NMK_GPIO_ALT_B),
663 DB8500_PIN_GROUP(msp1txrx_b_1, NMK_GPIO_ALT_B), 665 DB8500_PIN_GROUP(msp1txrx_b_1, NMK_GPIO_ALT_B),
664 DB8500_PIN_GROUP(kp_b_1, NMK_GPIO_ALT_B), 666 DB8500_PIN_GROUP(kp_b_1, NMK_GPIO_ALT_B),
667 DB8500_PIN_GROUP(kp_b_2, NMK_GPIO_ALT_B),
665 DB8500_PIN_GROUP(sm_b_1, NMK_GPIO_ALT_B), 668 DB8500_PIN_GROUP(sm_b_1, NMK_GPIO_ALT_B),
666 DB8500_PIN_GROUP(smcs0_b_1, NMK_GPIO_ALT_B), 669 DB8500_PIN_GROUP(smcs0_b_1, NMK_GPIO_ALT_B),
667 DB8500_PIN_GROUP(smcs1_b_1, NMK_GPIO_ALT_B), 670 DB8500_PIN_GROUP(smcs1_b_1, NMK_GPIO_ALT_B),
@@ -751,7 +754,7 @@ DB8500_FUNC_GROUPS(msp1, "msp1txrx_a_1", "msp1_a_1", "msp1txrx_b_1");
751DB8500_FUNC_GROUPS(lcdb, "lcdb_a_1"); 754DB8500_FUNC_GROUPS(lcdb, "lcdb_a_1");
752DB8500_FUNC_GROUPS(lcd, "lcdvsi0_a_1", "lcdvsi1_a_1", "lcd_d0_d7_a_1", 755DB8500_FUNC_GROUPS(lcd, "lcdvsi0_a_1", "lcdvsi1_a_1", "lcd_d0_d7_a_1",
753 "lcd_d8_d11_a_1", "lcd_d12_d23_a_1", "lcd_b_1"); 756 "lcd_d8_d11_a_1", "lcd_d12_d23_a_1", "lcd_b_1");
754DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_c_1", "kp_oc1_1"); 757DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_b_2", "kp_c_1", "kp_oc1_1");
755DB8500_FUNC_GROUPS(mc2, "mc2_a_1", "mc2rstn_c_1"); 758DB8500_FUNC_GROUPS(mc2, "mc2_a_1", "mc2rstn_c_1");
756DB8500_FUNC_GROUPS(ssp1, "ssp1_a_1"); 759DB8500_FUNC_GROUPS(ssp1, "ssp1_a_1");
757DB8500_FUNC_GROUPS(ssp0, "ssp0_a_1"); 760DB8500_FUNC_GROUPS(ssp0, "ssp0_a_1");
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index ec6ac501b23a..3dde6537adb8 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -1292,7 +1292,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
1292 NOMADIK_GPIO_TO_IRQ(pdata->first_gpio), 1292 NOMADIK_GPIO_TO_IRQ(pdata->first_gpio),
1293 0, &nmk_gpio_irq_simple_ops, nmk_chip); 1293 0, &nmk_gpio_irq_simple_ops, nmk_chip);
1294 if (!nmk_chip->domain) { 1294 if (!nmk_chip->domain) {
1295 pr_err("%s: Failed to create irqdomain\n", np->full_name); 1295 dev_err(&dev->dev, "failed to create irqdomain\n");
1296 ret = -ENOSYS; 1296 ret = -ENOSYS;
1297 goto out; 1297 goto out;
1298 } 1298 }
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 2a262f5c5c0c..c86bae828c28 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -289,6 +289,7 @@ config IDEAPAD_LAPTOP
289 tristate "Lenovo IdeaPad Laptop Extras" 289 tristate "Lenovo IdeaPad Laptop Extras"
290 depends on ACPI 290 depends on ACPI
291 depends on RFKILL && INPUT 291 depends on RFKILL && INPUT
292 depends on SERIO_I8042
292 select INPUT_SPARSEKMAP 293 select INPUT_SPARSEKMAP
293 help 294 help
294 This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. 295 This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
@@ -758,8 +759,11 @@ config SAMSUNG_Q10
758 759
759config APPLE_GMUX 760config APPLE_GMUX
760 tristate "Apple Gmux Driver" 761 tristate "Apple Gmux Driver"
762 depends on ACPI
761 depends on PNP 763 depends on PNP
762 select BACKLIGHT_CLASS_DEVICE 764 depends on BACKLIGHT_CLASS_DEVICE
765 depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE
766 depends on ACPI_VIDEO=n || ACPI_VIDEO
763 ---help--- 767 ---help---
764 This driver provides support for the gmux device found on many 768 This driver provides support for the gmux device found on many
765 Apple laptops, which controls the display mux for the hybrid 769 Apple laptops, which controls the display mux for the hybrid
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 905fa01ac8df..dfb1a92ce949 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -2,6 +2,7 @@
2 * Gmux driver for Apple laptops 2 * Gmux driver for Apple laptops
3 * 3 *
4 * Copyright (C) Canonical Ltd. <seth.forshee@canonical.com> 4 * Copyright (C) Canonical Ltd. <seth.forshee@canonical.com>
5 * Copyright (C) 2010-2012 Andreas Heider <andreas@meetr.de>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -18,16 +19,30 @@
18#include <linux/pnp.h> 19#include <linux/pnp.h>
19#include <linux/apple_bl.h> 20#include <linux/apple_bl.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/delay.h>
23#include <linux/pci.h>
24#include <linux/vga_switcheroo.h>
21#include <acpi/video.h> 25#include <acpi/video.h>
22#include <asm/io.h> 26#include <asm/io.h>
23 27
24struct apple_gmux_data { 28struct apple_gmux_data {
25 unsigned long iostart; 29 unsigned long iostart;
26 unsigned long iolen; 30 unsigned long iolen;
31 bool indexed;
32 struct mutex index_lock;
27 33
28 struct backlight_device *bdev; 34 struct backlight_device *bdev;
35
36 /* switcheroo data */
37 acpi_handle dhandle;
38 int gpe;
39 enum vga_switcheroo_client_id resume_client_id;
40 enum vga_switcheroo_state power_state;
41 struct completion powerchange_done;
29}; 42};
30 43
44static struct apple_gmux_data *apple_gmux_data;
45
31/* 46/*
32 * gmux port offsets. Many of these are not yet used, but may be in the 47 * gmux port offsets. Many of these are not yet used, but may be in the
33 * future, and it's useful to have them documented here anyhow. 48 * future, and it's useful to have them documented here anyhow.
@@ -45,6 +60,9 @@ struct apple_gmux_data {
45#define GMUX_PORT_DISCRETE_POWER 0x50 60#define GMUX_PORT_DISCRETE_POWER 0x50
46#define GMUX_PORT_MAX_BRIGHTNESS 0x70 61#define GMUX_PORT_MAX_BRIGHTNESS 0x70
47#define GMUX_PORT_BRIGHTNESS 0x74 62#define GMUX_PORT_BRIGHTNESS 0x74
63#define GMUX_PORT_VALUE 0xc2
64#define GMUX_PORT_READ 0xd0
65#define GMUX_PORT_WRITE 0xd4
48 66
49#define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4) 67#define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4)
50 68
@@ -59,22 +77,172 @@ struct apple_gmux_data {
59#define GMUX_BRIGHTNESS_MASK 0x00ffffff 77#define GMUX_BRIGHTNESS_MASK 0x00ffffff
60#define GMUX_MAX_BRIGHTNESS GMUX_BRIGHTNESS_MASK 78#define GMUX_MAX_BRIGHTNESS GMUX_BRIGHTNESS_MASK
61 79
62static inline u8 gmux_read8(struct apple_gmux_data *gmux_data, int port) 80static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port)
63{ 81{
64 return inb(gmux_data->iostart + port); 82 return inb(gmux_data->iostart + port);
65} 83}
66 84
67static inline void gmux_write8(struct apple_gmux_data *gmux_data, int port, 85static void gmux_pio_write8(struct apple_gmux_data *gmux_data, int port,
68 u8 val) 86 u8 val)
69{ 87{
70 outb(val, gmux_data->iostart + port); 88 outb(val, gmux_data->iostart + port);
71} 89}
72 90
73static inline u32 gmux_read32(struct apple_gmux_data *gmux_data, int port) 91static u32 gmux_pio_read32(struct apple_gmux_data *gmux_data, int port)
74{ 92{
75 return inl(gmux_data->iostart + port); 93 return inl(gmux_data->iostart + port);
76} 94}
77 95
96static void gmux_pio_write32(struct apple_gmux_data *gmux_data, int port,
97 u32 val)
98{
99 int i;
100 u8 tmpval;
101
102 for (i = 0; i < 4; i++) {
103 tmpval = (val >> (i * 8)) & 0xff;
104 outb(tmpval, port + i);
105 }
106}
107
108static int gmux_index_wait_ready(struct apple_gmux_data *gmux_data)
109{
110 int i = 200;
111 u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
112
113 while (i && (gwr & 0x01)) {
114 inb(gmux_data->iostart + GMUX_PORT_READ);
115 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
116 udelay(100);
117 i--;
118 }
119
120 return !!i;
121}
122
123static int gmux_index_wait_complete(struct apple_gmux_data *gmux_data)
124{
125 int i = 200;
126 u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
127
128 while (i && !(gwr & 0x01)) {
129 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
130 udelay(100);
131 i--;
132 }
133
134 if (gwr & 0x01)
135 inb(gmux_data->iostart + GMUX_PORT_READ);
136
137 return !!i;
138}
139
140static u8 gmux_index_read8(struct apple_gmux_data *gmux_data, int port)
141{
142 u8 val;
143
144 mutex_lock(&gmux_data->index_lock);
145 outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ);
146 gmux_index_wait_ready(gmux_data);
147 val = inb(gmux_data->iostart + GMUX_PORT_VALUE);
148 mutex_unlock(&gmux_data->index_lock);
149
150 return val;
151}
152
153static void gmux_index_write8(struct apple_gmux_data *gmux_data, int port,
154 u8 val)
155{
156 mutex_lock(&gmux_data->index_lock);
157 outb(val, gmux_data->iostart + GMUX_PORT_VALUE);
158 gmux_index_wait_ready(gmux_data);
159 outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE);
160 gmux_index_wait_complete(gmux_data);
161 mutex_unlock(&gmux_data->index_lock);
162}
163
164static u32 gmux_index_read32(struct apple_gmux_data *gmux_data, int port)
165{
166 u32 val;
167
168 mutex_lock(&gmux_data->index_lock);
169 outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ);
170 gmux_index_wait_ready(gmux_data);
171 val = inl(gmux_data->iostart + GMUX_PORT_VALUE);
172 mutex_unlock(&gmux_data->index_lock);
173
174 return val;
175}
176
177static void gmux_index_write32(struct apple_gmux_data *gmux_data, int port,
178 u32 val)
179{
180 int i;
181 u8 tmpval;
182
183 mutex_lock(&gmux_data->index_lock);
184
185 for (i = 0; i < 4; i++) {
186 tmpval = (val >> (i * 8)) & 0xff;
187 outb(tmpval, gmux_data->iostart + GMUX_PORT_VALUE + i);
188 }
189
190 gmux_index_wait_ready(gmux_data);
191 outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE);
192 gmux_index_wait_complete(gmux_data);
193 mutex_unlock(&gmux_data->index_lock);
194}
195
196static u8 gmux_read8(struct apple_gmux_data *gmux_data, int port)
197{
198 if (gmux_data->indexed)
199 return gmux_index_read8(gmux_data, port);
200 else
201 return gmux_pio_read8(gmux_data, port);
202}
203
204static void gmux_write8(struct apple_gmux_data *gmux_data, int port, u8 val)
205{
206 if (gmux_data->indexed)
207 gmux_index_write8(gmux_data, port, val);
208 else
209 gmux_pio_write8(gmux_data, port, val);
210}
211
212static u32 gmux_read32(struct apple_gmux_data *gmux_data, int port)
213{
214 if (gmux_data->indexed)
215 return gmux_index_read32(gmux_data, port);
216 else
217 return gmux_pio_read32(gmux_data, port);
218}
219
220static void gmux_write32(struct apple_gmux_data *gmux_data, int port,
221 u32 val)
222{
223 if (gmux_data->indexed)
224 gmux_index_write32(gmux_data, port, val);
225 else
226 gmux_pio_write32(gmux_data, port, val);
227}
228
229static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
230{
231 u16 val;
232
233 outb(0xaa, gmux_data->iostart + 0xcc);
234 outb(0x55, gmux_data->iostart + 0xcd);
235 outb(0x00, gmux_data->iostart + 0xce);
236
237 val = inb(gmux_data->iostart + 0xcc) |
238 (inb(gmux_data->iostart + 0xcd) << 8);
239
240 if (val == 0x55aa)
241 return true;
242
243 return false;
244}
245
78static int gmux_get_brightness(struct backlight_device *bd) 246static int gmux_get_brightness(struct backlight_device *bd)
79{ 247{
80 struct apple_gmux_data *gmux_data = bl_get_data(bd); 248 struct apple_gmux_data *gmux_data = bl_get_data(bd);
@@ -90,16 +258,7 @@ static int gmux_update_status(struct backlight_device *bd)
90 if (bd->props.state & BL_CORE_SUSPENDED) 258 if (bd->props.state & BL_CORE_SUSPENDED)
91 return 0; 259 return 0;
92 260
93 /* 261 gmux_write32(gmux_data, GMUX_PORT_BRIGHTNESS, brightness);
94 * Older gmux versions require writing out lower bytes first then
95 * setting the upper byte to 0 to flush the values. Newer versions
96 * accept a single u32 write, but the old method also works, so we
97 * just use the old method for all gmux versions.
98 */
99 gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS, brightness);
100 gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 1, brightness >> 8);
101 gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 2, brightness >> 16);
102 gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 3, 0);
103 262
104 return 0; 263 return 0;
105} 264}
@@ -110,6 +269,146 @@ static const struct backlight_ops gmux_bl_ops = {
110 .update_status = gmux_update_status, 269 .update_status = gmux_update_status,
111}; 270};
112 271
272static int gmux_switchto(enum vga_switcheroo_client_id id)
273{
274 if (id == VGA_SWITCHEROO_IGD) {
275 gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 1);
276 gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2);
277 gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2);
278 } else {
279 gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 2);
280 gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3);
281 gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3);
282 }
283
284 return 0;
285}
286
287static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data,
288 enum vga_switcheroo_state state)
289{
290 INIT_COMPLETION(gmux_data->powerchange_done);
291
292 if (state == VGA_SWITCHEROO_ON) {
293 gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1);
294 gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 3);
295 pr_debug("Discrete card powered up\n");
296 } else {
297 gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1);
298 gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 0);
299 pr_debug("Discrete card powered down\n");
300 }
301
302 gmux_data->power_state = state;
303
304 if (gmux_data->gpe >= 0 &&
305 !wait_for_completion_interruptible_timeout(&gmux_data->powerchange_done,
306 msecs_to_jiffies(200)))
307 pr_warn("Timeout waiting for gmux switch to complete\n");
308
309 return 0;
310}
311
312static int gmux_set_power_state(enum vga_switcheroo_client_id id,
313 enum vga_switcheroo_state state)
314{
315 if (id == VGA_SWITCHEROO_IGD)
316 return 0;
317
318 return gmux_set_discrete_state(apple_gmux_data, state);
319}
320
321static int gmux_get_client_id(struct pci_dev *pdev)
322{
323 /*
324 * Early Macbook Pros with switchable graphics use nvidia
325 * integrated graphics. Hardcode that the 9400M is integrated.
326 */
327 if (pdev->vendor == PCI_VENDOR_ID_INTEL)
328 return VGA_SWITCHEROO_IGD;
329 else if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
330 pdev->device == 0x0863)
331 return VGA_SWITCHEROO_IGD;
332 else
333 return VGA_SWITCHEROO_DIS;
334}
335
336static enum vga_switcheroo_client_id
337gmux_active_client(struct apple_gmux_data *gmux_data)
338{
339 if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2)
340 return VGA_SWITCHEROO_IGD;
341
342 return VGA_SWITCHEROO_DIS;
343}
344
345static struct vga_switcheroo_handler gmux_handler = {
346 .switchto = gmux_switchto,
347 .power_state = gmux_set_power_state,
348 .get_client_id = gmux_get_client_id,
349};
350
351static inline void gmux_disable_interrupts(struct apple_gmux_data *gmux_data)
352{
353 gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE,
354 GMUX_INTERRUPT_DISABLE);
355}
356
357static inline void gmux_enable_interrupts(struct apple_gmux_data *gmux_data)
358{
359 gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE,
360 GMUX_INTERRUPT_ENABLE);
361}
362
363static inline u8 gmux_interrupt_get_status(struct apple_gmux_data *gmux_data)
364{
365 return gmux_read8(gmux_data, GMUX_PORT_INTERRUPT_STATUS);
366}
367
368static void gmux_clear_interrupts(struct apple_gmux_data *gmux_data)
369{
370 u8 status;
371
372 /* to clear interrupts write back current status */
373 status = gmux_interrupt_get_status(gmux_data);
374 gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_STATUS, status);
375}
376
377static void gmux_notify_handler(acpi_handle device, u32 value, void *context)
378{
379 u8 status;
380 struct pnp_dev *pnp = (struct pnp_dev *)context;
381 struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
382
383 status = gmux_interrupt_get_status(gmux_data);
384 gmux_disable_interrupts(gmux_data);
385 pr_debug("Notify handler called: status %d\n", status);
386
387 gmux_clear_interrupts(gmux_data);
388 gmux_enable_interrupts(gmux_data);
389
390 if (status & GMUX_INTERRUPT_STATUS_POWER)
391 complete(&gmux_data->powerchange_done);
392}
393
394static int gmux_suspend(struct pnp_dev *pnp, pm_message_t state)
395{
396 struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
397 gmux_data->resume_client_id = gmux_active_client(gmux_data);
398 gmux_disable_interrupts(gmux_data);
399 return 0;
400}
401
402static int gmux_resume(struct pnp_dev *pnp)
403{
404 struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
405 gmux_enable_interrupts(gmux_data);
406 gmux_switchto(gmux_data->resume_client_id);
407 if (gmux_data->power_state == VGA_SWITCHEROO_OFF)
408 gmux_set_discrete_state(gmux_data, gmux_data->power_state);
409 return 0;
410}
411
113static int __devinit gmux_probe(struct pnp_dev *pnp, 412static int __devinit gmux_probe(struct pnp_dev *pnp,
114 const struct pnp_device_id *id) 413 const struct pnp_device_id *id)
115{ 414{
@@ -119,6 +418,11 @@ static int __devinit gmux_probe(struct pnp_dev *pnp,
119 struct backlight_device *bdev; 418 struct backlight_device *bdev;
120 u8 ver_major, ver_minor, ver_release; 419 u8 ver_major, ver_minor, ver_release;
121 int ret = -ENXIO; 420 int ret = -ENXIO;
421 acpi_status status;
422 unsigned long long gpe;
423
424 if (apple_gmux_data)
425 return -EBUSY;
122 426
123 gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL); 427 gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL);
124 if (!gmux_data) 428 if (!gmux_data)
@@ -147,22 +451,29 @@ static int __devinit gmux_probe(struct pnp_dev *pnp,
147 } 451 }
148 452
149 /* 453 /*
150 * On some machines the gmux is in ACPI even thought the machine 454 * Invalid version information may indicate either that the gmux
151 * doesn't really have a gmux. Check for invalid version information 455 * device isn't present or that it's a new one that uses indexed
152 * to detect this. 456 * io
153 */ 457 */
458
154 ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR); 459 ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);
155 ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR); 460 ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);
156 ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE); 461 ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
157 if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) { 462 if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
158 pr_info("gmux device not present\n"); 463 if (gmux_is_indexed(gmux_data)) {
159 ret = -ENODEV; 464 mutex_init(&gmux_data->index_lock);
160 goto err_release; 465 gmux_data->indexed = true;
466 } else {
467 pr_info("gmux device not present\n");
468 ret = -ENODEV;
469 goto err_release;
470 }
471 pr_info("Found indexed gmux\n");
472 } else {
473 pr_info("Found gmux version %d.%d.%d\n", ver_major, ver_minor,
474 ver_release);
161 } 475 }
162 476
163 pr_info("Found gmux version %d.%d.%d\n", ver_major, ver_minor,
164 ver_release);
165
166 memset(&props, 0, sizeof(props)); 477 memset(&props, 0, sizeof(props));
167 props.type = BACKLIGHT_PLATFORM; 478 props.type = BACKLIGHT_PLATFORM;
168 props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS); 479 props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
@@ -194,13 +505,67 @@ static int __devinit gmux_probe(struct pnp_dev *pnp,
194 * Disable the other backlight choices. 505 * Disable the other backlight choices.
195 */ 506 */
196 acpi_video_dmi_promote_vendor(); 507 acpi_video_dmi_promote_vendor();
197#ifdef CONFIG_ACPI_VIDEO 508#if defined (CONFIG_ACPI_VIDEO) || defined (CONFIG_ACPI_VIDEO_MODULE)
198 acpi_video_unregister(); 509 acpi_video_unregister();
199#endif 510#endif
200 apple_bl_unregister(); 511 apple_bl_unregister();
201 512
513 gmux_data->power_state = VGA_SWITCHEROO_ON;
514
515 gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev);
516 if (!gmux_data->dhandle) {
517 pr_err("Cannot find acpi handle for pnp device %s\n",
518 dev_name(&pnp->dev));
519 ret = -ENODEV;
520 goto err_notify;
521 }
522
523 status = acpi_evaluate_integer(gmux_data->dhandle, "GMGP", NULL, &gpe);
524 if (ACPI_SUCCESS(status)) {
525 gmux_data->gpe = (int)gpe;
526
527 status = acpi_install_notify_handler(gmux_data->dhandle,
528 ACPI_DEVICE_NOTIFY,
529 &gmux_notify_handler, pnp);
530 if (ACPI_FAILURE(status)) {
531 pr_err("Install notify handler failed: %s\n",
532 acpi_format_exception(status));
533 ret = -ENODEV;
534 goto err_notify;
535 }
536
537 status = acpi_enable_gpe(NULL, gmux_data->gpe);
538 if (ACPI_FAILURE(status)) {
539 pr_err("Cannot enable gpe: %s\n",
540 acpi_format_exception(status));
541 goto err_enable_gpe;
542 }
543 } else {
544 pr_warn("No GPE found for gmux\n");
545 gmux_data->gpe = -1;
546 }
547
548 if (vga_switcheroo_register_handler(&gmux_handler)) {
549 ret = -ENODEV;
550 goto err_register_handler;
551 }
552
553 init_completion(&gmux_data->powerchange_done);
554 apple_gmux_data = gmux_data;
555 gmux_enable_interrupts(gmux_data);
556
202 return 0; 557 return 0;
203 558
559err_register_handler:
560 if (gmux_data->gpe >= 0)
561 acpi_disable_gpe(NULL, gmux_data->gpe);
562err_enable_gpe:
563 if (gmux_data->gpe >= 0)
564 acpi_remove_notify_handler(gmux_data->dhandle,
565 ACPI_DEVICE_NOTIFY,
566 &gmux_notify_handler);
567err_notify:
568 backlight_device_unregister(bdev);
204err_release: 569err_release:
205 release_region(gmux_data->iostart, gmux_data->iolen); 570 release_region(gmux_data->iostart, gmux_data->iolen);
206err_free: 571err_free:
@@ -212,12 +577,23 @@ static void __devexit gmux_remove(struct pnp_dev *pnp)
212{ 577{
213 struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); 578 struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
214 579
580 vga_switcheroo_unregister_handler();
581 gmux_disable_interrupts(gmux_data);
582 if (gmux_data->gpe >= 0) {
583 acpi_disable_gpe(NULL, gmux_data->gpe);
584 acpi_remove_notify_handler(gmux_data->dhandle,
585 ACPI_DEVICE_NOTIFY,
586 &gmux_notify_handler);
587 }
588
215 backlight_device_unregister(gmux_data->bdev); 589 backlight_device_unregister(gmux_data->bdev);
590
216 release_region(gmux_data->iostart, gmux_data->iolen); 591 release_region(gmux_data->iostart, gmux_data->iolen);
592 apple_gmux_data = NULL;
217 kfree(gmux_data); 593 kfree(gmux_data);
218 594
219 acpi_video_dmi_demote_vendor(); 595 acpi_video_dmi_demote_vendor();
220#ifdef CONFIG_ACPI_VIDEO 596#if defined (CONFIG_ACPI_VIDEO) || defined (CONFIG_ACPI_VIDEO_MODULE)
221 acpi_video_register(); 597 acpi_video_register();
222#endif 598#endif
223 apple_bl_register(); 599 apple_bl_register();
@@ -233,6 +609,8 @@ static struct pnp_driver gmux_pnp_driver = {
233 .probe = gmux_probe, 609 .probe = gmux_probe,
234 .remove = __devexit_p(gmux_remove), 610 .remove = __devexit_p(gmux_remove),
235 .id_table = gmux_device_ids, 611 .id_table = gmux_device_ids,
612 .suspend = gmux_suspend,
613 .resume = gmux_resume
236}; 614};
237 615
238static int __init apple_gmux_init(void) 616static int __init apple_gmux_init(void)
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index c7a36f6b0580..2eb9fe8e8efd 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -101,6 +101,7 @@ MODULE_LICENSE("GPL");
101#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002 101#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002
102#define ASUS_WMI_DEVID_CWAP 0x00010003 102#define ASUS_WMI_DEVID_CWAP 0x00010003
103#define ASUS_WMI_DEVID_WLAN 0x00010011 103#define ASUS_WMI_DEVID_WLAN 0x00010011
104#define ASUS_WMI_DEVID_WLAN_LED 0x00010012
104#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013 105#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013
105#define ASUS_WMI_DEVID_GPS 0x00010015 106#define ASUS_WMI_DEVID_GPS 0x00010015
106#define ASUS_WMI_DEVID_WIMAX 0x00010017 107#define ASUS_WMI_DEVID_WIMAX 0x00010017
@@ -731,8 +732,21 @@ static int asus_rfkill_set(void *data, bool blocked)
731{ 732{
732 struct asus_rfkill *priv = data; 733 struct asus_rfkill *priv = data;
733 u32 ctrl_param = !blocked; 734 u32 ctrl_param = !blocked;
735 u32 dev_id = priv->dev_id;
734 736
735 return asus_wmi_set_devstate(priv->dev_id, ctrl_param, NULL); 737 /*
738 * If the user bit is set, BIOS can't set and record the wlan status,
739 * it will report the value read from id ASUS_WMI_DEVID_WLAN_LED
740 * while we query the wlan status through WMI(ASUS_WMI_DEVID_WLAN).
741 * So, we have to record wlan status in id ASUS_WMI_DEVID_WLAN_LED
742 * while setting the wlan status through WMI.
743 * This is also the behavior that windows app will do.
744 */
745 if ((dev_id == ASUS_WMI_DEVID_WLAN) &&
746 priv->asus->driver->wlan_ctrl_by_user)
747 dev_id = ASUS_WMI_DEVID_WLAN_LED;
748
749 return asus_wmi_set_devstate(dev_id, ctrl_param, NULL);
736} 750}
737 751
738static void asus_rfkill_query(struct rfkill *rfkill, void *data) 752static void asus_rfkill_query(struct rfkill *rfkill, void *data)
@@ -1653,6 +1667,7 @@ static int asus_wmi_add(struct platform_device *pdev)
1653 struct asus_wmi *asus; 1667 struct asus_wmi *asus;
1654 acpi_status status; 1668 acpi_status status;
1655 int err; 1669 int err;
1670 u32 result;
1656 1671
1657 asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL); 1672 asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL);
1658 if (!asus) 1673 if (!asus)
@@ -1711,6 +1726,10 @@ static int asus_wmi_add(struct platform_device *pdev)
1711 if (err) 1726 if (err)
1712 goto fail_debugfs; 1727 goto fail_debugfs;
1713 1728
1729 asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
1730 if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
1731 asus->driver->wlan_ctrl_by_user = 1;
1732
1714 return 0; 1733 return 0;
1715 1734
1716fail_debugfs: 1735fail_debugfs:
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 9c1da8b81bea..4c9bd38bb0a2 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -46,6 +46,7 @@ struct quirk_entry {
46struct asus_wmi_driver { 46struct asus_wmi_driver {
47 int brightness; 47 int brightness;
48 int panel_power; 48 int panel_power;
49 int wlan_ctrl_by_user;
49 50
50 const char *name; 51 const char *name;
51 struct module *owner; 52 struct module *owner;
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index cd33add118ce..c87ff16873f9 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -725,8 +725,10 @@ static void cmpc_tablet_handler(struct acpi_device *dev, u32 event)
725 struct input_dev *inputdev = dev_get_drvdata(&dev->dev); 725 struct input_dev *inputdev = dev_get_drvdata(&dev->dev);
726 726
727 if (event == 0x81) { 727 if (event == 0x81) {
728 if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) 728 if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) {
729 input_report_switch(inputdev, SW_TABLET_MODE, !val); 729 input_report_switch(inputdev, SW_TABLET_MODE, !val);
730 input_sync(inputdev);
731 }
730 } 732 }
731} 733}
732 734
@@ -739,8 +741,10 @@ static void cmpc_tablet_idev_init(struct input_dev *inputdev)
739 set_bit(SW_TABLET_MODE, inputdev->swbit); 741 set_bit(SW_TABLET_MODE, inputdev->swbit);
740 742
741 acpi = to_acpi_device(inputdev->dev.parent); 743 acpi = to_acpi_device(inputdev->dev.parent);
742 if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) 744 if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) {
743 input_report_switch(inputdev, SW_TABLET_MODE, !val); 745 input_report_switch(inputdev, SW_TABLET_MODE, !val);
746 input_sync(inputdev);
747 }
744} 748}
745 749
746static int cmpc_tablet_add(struct acpi_device *acpi) 750static int cmpc_tablet_add(struct acpi_device *acpi)
@@ -760,8 +764,10 @@ static int cmpc_tablet_resume(struct device *dev)
760 struct input_dev *inputdev = dev_get_drvdata(dev); 764 struct input_dev *inputdev = dev_get_drvdata(dev);
761 765
762 unsigned long long val = 0; 766 unsigned long long val = 0;
763 if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val))) 767 if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val))) {
764 input_report_switch(inputdev, SW_TABLET_MODE, !val); 768 input_report_switch(inputdev, SW_TABLET_MODE, !val);
769 input_sync(inputdev);
770 }
765 return 0; 771 return 0;
766} 772}
767#endif 773#endif
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 4e96e8c0b60f..927c33af67ec 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -211,7 +211,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {
211 .ident = "Dell Inspiron 5420", 211 .ident = "Dell Inspiron 5420",
212 .matches = { 212 .matches = {
213 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 213 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
214 DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5420"), 214 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5420"),
215 }, 215 },
216 .driver_data = &quirk_dell_vostro_v130, 216 .driver_data = &quirk_dell_vostro_v130,
217 }, 217 },
@@ -220,7 +220,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {
220 .ident = "Dell Inspiron 5520", 220 .ident = "Dell Inspiron 5520",
221 .matches = { 221 .matches = {
222 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 222 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
223 DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5520"), 223 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5520"),
224 }, 224 },
225 .driver_data = &quirk_dell_vostro_v130, 225 .driver_data = &quirk_dell_vostro_v130,
226 }, 226 },
@@ -229,7 +229,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {
229 .ident = "Dell Inspiron 5720", 229 .ident = "Dell Inspiron 5720",
230 .matches = { 230 .matches = {
231 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 231 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
232 DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5720"), 232 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5720"),
233 }, 233 },
234 .driver_data = &quirk_dell_vostro_v130, 234 .driver_data = &quirk_dell_vostro_v130,
235 }, 235 },
@@ -238,7 +238,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {
238 .ident = "Dell Inspiron 7420", 238 .ident = "Dell Inspiron 7420",
239 .matches = { 239 .matches = {
240 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 240 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
241 DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7420"), 241 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7420"),
242 }, 242 },
243 .driver_data = &quirk_dell_vostro_v130, 243 .driver_data = &quirk_dell_vostro_v130,
244 }, 244 },
@@ -247,7 +247,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {
247 .ident = "Dell Inspiron 7520", 247 .ident = "Dell Inspiron 7520",
248 .matches = { 248 .matches = {
249 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 249 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
250 DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7520"), 250 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
251 }, 251 },
252 .driver_data = &quirk_dell_vostro_v130, 252 .driver_data = &quirk_dell_vostro_v130,
253 }, 253 },
@@ -256,7 +256,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {
256 .ident = "Dell Inspiron 7720", 256 .ident = "Dell Inspiron 7720",
257 .matches = { 257 .matches = {
258 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 258 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
259 DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7720"), 259 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7720"),
260 }, 260 },
261 .driver_data = &quirk_dell_vostro_v130, 261 .driver_data = &quirk_dell_vostro_v130,
262 }, 262 },
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 17f6dfd8dbfb..dae7abe1d711 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -36,6 +36,7 @@
36#include <linux/fb.h> 36#include <linux/fb.h>
37#include <linux/debugfs.h> 37#include <linux/debugfs.h>
38#include <linux/seq_file.h> 38#include <linux/seq_file.h>
39#include <linux/i8042.h>
39 40
40#define IDEAPAD_RFKILL_DEV_NUM (3) 41#define IDEAPAD_RFKILL_DEV_NUM (3)
41 42
@@ -63,8 +64,11 @@ enum {
63 VPCCMD_R_3G, 64 VPCCMD_R_3G,
64 VPCCMD_W_3G, 65 VPCCMD_W_3G,
65 VPCCMD_R_ODD, /* 0x21 */ 66 VPCCMD_R_ODD, /* 0x21 */
66 VPCCMD_R_RF = 0x23, 67 VPCCMD_W_FAN,
68 VPCCMD_R_RF,
67 VPCCMD_W_RF, 69 VPCCMD_W_RF,
70 VPCCMD_R_FAN = 0x2B,
71 VPCCMD_R_SPECIAL_BUTTONS = 0x31,
68 VPCCMD_W_BL_POWER = 0x33, 72 VPCCMD_W_BL_POWER = 0x33,
69}; 73};
70 74
@@ -356,14 +360,46 @@ static ssize_t store_ideapad_cam(struct device *dev,
356 return -EINVAL; 360 return -EINVAL;
357 ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state); 361 ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state);
358 if (ret < 0) 362 if (ret < 0)
359 return ret; 363 return -EIO;
360 return count; 364 return count;
361} 365}
362 366
363static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam); 367static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam);
364 368
369static ssize_t show_ideapad_fan(struct device *dev,
370 struct device_attribute *attr,
371 char *buf)
372{
373 unsigned long result;
374
375 if (read_ec_data(ideapad_handle, VPCCMD_R_FAN, &result))
376 return sprintf(buf, "-1\n");
377 return sprintf(buf, "%lu\n", result);
378}
379
380static ssize_t store_ideapad_fan(struct device *dev,
381 struct device_attribute *attr,
382 const char *buf, size_t count)
383{
384 int ret, state;
385
386 if (!count)
387 return 0;
388 if (sscanf(buf, "%i", &state) != 1)
389 return -EINVAL;
390 if (state < 0 || state > 4 || state == 3)
391 return -EINVAL;
392 ret = write_ec_cmd(ideapad_handle, VPCCMD_W_FAN, state);
393 if (ret < 0)
394 return -EIO;
395 return count;
396}
397
398static DEVICE_ATTR(fan_mode, 0644, show_ideapad_fan, store_ideapad_fan);
399
365static struct attribute *ideapad_attributes[] = { 400static struct attribute *ideapad_attributes[] = {
366 &dev_attr_camera_power.attr, 401 &dev_attr_camera_power.attr,
402 &dev_attr_fan_mode.attr,
367 NULL 403 NULL
368}; 404};
369 405
@@ -377,7 +413,10 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
377 413
378 if (attr == &dev_attr_camera_power.attr) 414 if (attr == &dev_attr_camera_power.attr)
379 supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg)); 415 supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg));
380 else 416 else if (attr == &dev_attr_fan_mode.attr) {
417 unsigned long value;
418 supported = !read_ec_data(ideapad_handle, VPCCMD_R_FAN, &value);
419 } else
381 supported = true; 420 supported = true;
382 421
383 return supported ? attr->mode : 0; 422 return supported ? attr->mode : 0;
@@ -518,9 +557,15 @@ static void ideapad_platform_exit(struct ideapad_private *priv)
518 */ 557 */
519static const struct key_entry ideapad_keymap[] = { 558static const struct key_entry ideapad_keymap[] = {
520 { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } }, 559 { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } },
560 { KE_KEY, 7, { KEY_CAMERA } },
561 { KE_KEY, 11, { KEY_F16 } },
521 { KE_KEY, 13, { KEY_WLAN } }, 562 { KE_KEY, 13, { KEY_WLAN } },
522 { KE_KEY, 16, { KEY_PROG1 } }, 563 { KE_KEY, 16, { KEY_PROG1 } },
523 { KE_KEY, 17, { KEY_PROG2 } }, 564 { KE_KEY, 17, { KEY_PROG2 } },
565 { KE_KEY, 64, { KEY_PROG3 } },
566 { KE_KEY, 65, { KEY_PROG4 } },
567 { KE_KEY, 66, { KEY_TOUCHPAD_OFF } },
568 { KE_KEY, 67, { KEY_TOUCHPAD_ON } },
524 { KE_END, 0 }, 569 { KE_END, 0 },
525}; 570};
526 571
@@ -587,6 +632,28 @@ static void ideapad_input_novokey(struct ideapad_private *priv)
587 ideapad_input_report(priv, 16); 632 ideapad_input_report(priv, 16);
588} 633}
589 634
635static void ideapad_check_special_buttons(struct ideapad_private *priv)
636{
637 unsigned long bit, value;
638
639 read_ec_data(ideapad_handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
640
641 for (bit = 0; bit < 16; bit++) {
642 if (test_bit(bit, &value)) {
643 switch (bit) {
644 case 6:
645 /* Thermal Management button */
646 ideapad_input_report(priv, 65);
647 break;
648 case 1:
649 /* OneKey Theater button */
650 ideapad_input_report(priv, 64);
651 break;
652 }
653 }
654 }
655}
656
590/* 657/*
591 * backlight 658 * backlight
592 */ 659 */
@@ -691,6 +758,24 @@ static const struct acpi_device_id ideapad_device_ids[] = {
691}; 758};
692MODULE_DEVICE_TABLE(acpi, ideapad_device_ids); 759MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
693 760
761static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
762{
763 struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
764 unsigned long value;
765
766 /* Without reading from EC touchpad LED doesn't switch state */
767 if (!read_ec_data(adevice->handle, VPCCMD_R_TOUCHPAD, &value)) {
768 /* Some IdeaPads don't really turn off touchpad - they only
769 * switch the LED state. We (de)activate KBC AUX port to turn
770 * touchpad off and on. We send KEY_TOUCHPAD_OFF and
771 * KEY_TOUCHPAD_ON to not to get out of sync with LED */
772 unsigned char param;
773 i8042_command(&param, value ? I8042_CMD_AUX_ENABLE :
774 I8042_CMD_AUX_DISABLE);
775 ideapad_input_report(priv, value ? 67 : 66);
776 }
777}
778
694static int __devinit ideapad_acpi_add(struct acpi_device *adevice) 779static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
695{ 780{
696 int ret, i; 781 int ret, i;
@@ -727,6 +812,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
727 priv->rfk[i] = NULL; 812 priv->rfk[i] = NULL;
728 } 813 }
729 ideapad_sync_rfk_state(priv); 814 ideapad_sync_rfk_state(priv);
815 ideapad_sync_touchpad_state(adevice);
730 816
731 if (!acpi_video_backlight_support()) { 817 if (!acpi_video_backlight_support()) {
732 ret = ideapad_backlight_init(priv); 818 ret = ideapad_backlight_init(priv);
@@ -785,9 +871,14 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
785 ideapad_sync_rfk_state(priv); 871 ideapad_sync_rfk_state(priv);
786 break; 872 break;
787 case 13: 873 case 13:
874 case 11:
875 case 7:
788 case 6: 876 case 6:
789 ideapad_input_report(priv, vpc_bit); 877 ideapad_input_report(priv, vpc_bit);
790 break; 878 break;
879 case 5:
880 ideapad_sync_touchpad_state(adevice);
881 break;
791 case 4: 882 case 4:
792 ideapad_backlight_notify_brightness(priv); 883 ideapad_backlight_notify_brightness(priv);
793 break; 884 break;
@@ -797,6 +888,9 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
797 case 2: 888 case 2:
798 ideapad_backlight_notify_power(priv); 889 ideapad_backlight_notify_power(priv);
799 break; 890 break;
891 case 0:
892 ideapad_check_special_buttons(priv);
893 break;
800 default: 894 default:
801 pr_info("Unknown event: %lu\n", vpc_bit); 895 pr_info("Unknown event: %lu\n", vpc_bit);
802 } 896 }
@@ -804,6 +898,15 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
804 } 898 }
805} 899}
806 900
901static int ideapad_acpi_resume(struct device *device)
902{
903 ideapad_sync_rfk_state(ideapad_priv);
904 ideapad_sync_touchpad_state(to_acpi_device(device));
905 return 0;
906}
907
908static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume);
909
807static struct acpi_driver ideapad_acpi_driver = { 910static struct acpi_driver ideapad_acpi_driver = {
808 .name = "ideapad_acpi", 911 .name = "ideapad_acpi",
809 .class = "IdeaPad", 912 .class = "IdeaPad",
@@ -811,6 +914,7 @@ static struct acpi_driver ideapad_acpi_driver = {
811 .ops.add = ideapad_acpi_add, 914 .ops.add = ideapad_acpi_add,
812 .ops.remove = ideapad_acpi_remove, 915 .ops.remove = ideapad_acpi_remove,
813 .ops.notify = ideapad_acpi_notify, 916 .ops.notify = ideapad_acpi_notify,
917 .drv.pm = &ideapad_pm,
814 .owner = THIS_MODULE, 918 .owner = THIS_MODULE,
815}; 919};
816 920
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index f28f36ccdcf4..80e377949314 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -8664,6 +8664,13 @@ static int __must_check __init get_thinkpad_model_data(
8664 tp->model_str = kstrdup(s, GFP_KERNEL); 8664 tp->model_str = kstrdup(s, GFP_KERNEL);
8665 if (!tp->model_str) 8665 if (!tp->model_str)
8666 return -ENOMEM; 8666 return -ENOMEM;
8667 } else {
8668 s = dmi_get_system_info(DMI_BIOS_VENDOR);
8669 if (s && !(strnicmp(s, "Lenovo", 6))) {
8670 tp->model_str = kstrdup(s, GFP_KERNEL);
8671 if (!tp->model_str)
8672 return -ENOMEM;
8673 }
8667 } 8674 }
8668 8675
8669 s = dmi_get_system_info(DMI_PRODUCT_NAME); 8676 s = dmi_get_system_info(DMI_PRODUCT_NAME);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 8fc3808d7a3e..90c5c7357a50 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -1,12 +1,31 @@
1menuconfig PWM 1menuconfig PWM
2 bool "PWM Support" 2 bool "Pulse-Width Modulation (PWM) Support"
3 depends on !MACH_JZ4740 && !PUV3_PWM 3 depends on !MACH_JZ4740 && !PUV3_PWM
4 help 4 help
5 This enables PWM support through the generic PWM framework. 5 Generic Pulse-Width Modulation (PWM) support.
6 You only need to enable this, if you also want to enable 6
7 one or more of the PWM drivers below. 7 In Pulse-Width Modulation, a variation of the width of pulses
8 8 in a rectangular pulse signal is used as a means to alter the
9 If unsure, say N. 9 average power of the signal. Applications include efficient
10 power delivery and voltage regulation. In computer systems,
11 PWMs are commonly used to control fans or the brightness of
12 display backlights.
13
14 This framework provides a generic interface to PWM devices
15 within the Linux kernel. On the driver side it provides an API
16 to register and unregister a PWM chip, an abstraction of a PWM
17 controller, that supports one or more PWM devices. Client
18 drivers can request PWM devices and use the generic framework
19 to configure as well as enable and disable them.
20
21 This generic framework replaces the legacy PWM framework which
22 allows only a single driver implementing the required API. Not
23 all legacy implementations have been ported to the framework
24 yet. The framework provides an API that is backward compatible
25 with the legacy framework so that existing client drivers
26 continue to work as expected.
27
28 If unsure, say no.
10 29
11if PWM 30if PWM
12 31
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index ecb76909e946..c6e05078d3ad 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -129,8 +129,8 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
129 return 0; 129 return 0;
130} 130}
131 131
132static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc, 132static struct pwm_device *
133 const struct of_phandle_args *args) 133of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
134{ 134{
135 struct pwm_device *pwm; 135 struct pwm_device *pwm;
136 136
@@ -149,7 +149,7 @@ static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc,
149 return pwm; 149 return pwm;
150} 150}
151 151
152void of_pwmchip_add(struct pwm_chip *chip) 152static void of_pwmchip_add(struct pwm_chip *chip)
153{ 153{
154 if (!chip->dev || !chip->dev->of_node) 154 if (!chip->dev || !chip->dev->of_node)
155 return; 155 return;
@@ -162,7 +162,7 @@ void of_pwmchip_add(struct pwm_chip *chip)
162 of_node_get(chip->dev->of_node); 162 of_node_get(chip->dev->of_node);
163} 163}
164 164
165void of_pwmchip_remove(struct pwm_chip *chip) 165static void of_pwmchip_remove(struct pwm_chip *chip)
166{ 166{
167 if (chip->dev && chip->dev->of_node) 167 if (chip->dev && chip->dev->of_node)
168 of_node_put(chip->dev->of_node); 168 of_node_put(chip->dev->of_node);
@@ -527,7 +527,7 @@ void __init pwm_add_table(struct pwm_lookup *table, size_t num)
527struct pwm_device *pwm_get(struct device *dev, const char *con_id) 527struct pwm_device *pwm_get(struct device *dev, const char *con_id)
528{ 528{
529 struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER); 529 struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER);
530 const char *dev_id = dev ? dev_name(dev): NULL; 530 const char *dev_id = dev ? dev_name(dev) : NULL;
531 struct pwm_chip *chip = NULL; 531 struct pwm_chip *chip = NULL;
532 unsigned int index = 0; 532 unsigned int index = 0;
533 unsigned int best = 0; 533 unsigned int best = 0;
@@ -609,7 +609,7 @@ void pwm_put(struct pwm_device *pwm)
609 mutex_lock(&pwm_lock); 609 mutex_lock(&pwm_lock);
610 610
611 if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) { 611 if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) {
612 pr_warning("PWM device already freed\n"); 612 pr_warn("PWM device already freed\n");
613 goto out; 613 goto out;
614 } 614 }
615 615
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index d10386528c9c..e5187c0ade9f 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -225,6 +225,7 @@ static int s3c_pwm_probe(struct platform_device *pdev)
225 225
226 /* calculate base of control bits in TCON */ 226 /* calculate base of control bits in TCON */
227 s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4; 227 s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4;
228 s3c->chip.dev = &pdev->dev;
228 s3c->chip.ops = &s3c_pwm_ops; 229 s3c->chip.ops = &s3c_pwm_ops;
229 s3c->chip.base = -1; 230 s3c->chip.base = -1;
230 s3c->chip.npwm = 1; 231 s3c->chip.npwm = 1;
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 02ce18d5e49a..057465e0553c 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -187,10 +187,8 @@ static int tegra_pwm_probe(struct platform_device *pdev)
187 } 187 }
188 188
189 pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r); 189 pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
190 if (!pwm->mmio_base) { 190 if (!pwm->mmio_base)
191 dev_err(&pdev->dev, "failed to ioremap() region\n");
192 return -EADDRNOTAVAIL; 191 return -EADDRNOTAVAIL;
193 }
194 192
195 platform_set_drvdata(pdev, pwm); 193 platform_set_drvdata(pdev, pwm);
196 194
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 3c2ad284ee3e..0b66d0f25922 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -192,10 +192,8 @@ static int __devinit ecap_pwm_probe(struct platform_device *pdev)
192 } 192 }
193 193
194 pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r); 194 pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
195 if (!pc->mmio_base) { 195 if (!pc->mmio_base)
196 dev_err(&pdev->dev, "failed to ioremap() registers\n");
197 return -EADDRNOTAVAIL; 196 return -EADDRNOTAVAIL;
198 }
199 197
200 ret = pwmchip_add(&pc->chip); 198 ret = pwmchip_add(&pc->chip);
201 if (ret < 0) { 199 if (ret < 0) {
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 010d232cb0c8..c3756d1be194 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -371,10 +371,8 @@ static int __devinit ehrpwm_pwm_probe(struct platform_device *pdev)
371 } 371 }
372 372
373 pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r); 373 pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
374 if (!pc->mmio_base) { 374 if (!pc->mmio_base)
375 dev_err(&pdev->dev, "failed to ioremap() registers\n");
376 return -EADDRNOTAVAIL; 375 return -EADDRNOTAVAIL;
377 }
378 376
379 ret = pwmchip_add(&pc->chip); 377 ret = pwmchip_add(&pc->chip);
380 if (ret < 0) { 378 if (ret < 0) {
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 548021439f0c..ad14389b7144 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -41,7 +41,7 @@ static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask)
41 cpu_relax(); 41 cpu_relax();
42 42
43 if (unlikely(!loops)) 43 if (unlikely(!loops))
44 pr_warning("Waiting for status bits 0x%x to clear timed out\n", 44 pr_warn("Waiting for status bits 0x%x to clear timed out\n",
45 bitmask); 45 bitmask);
46} 46}
47 47
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 722246cf20ab..5d44252b7342 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -435,6 +435,9 @@ static void tsi721_db_dpc(struct work_struct *work)
435 " info %4.4x\n", DBELL_SID(idb.bytes), 435 " info %4.4x\n", DBELL_SID(idb.bytes),
436 DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); 436 DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
437 } 437 }
438
439 wr_ptr = ioread32(priv->regs +
440 TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
438 } 441 }
439 442
440 iowrite32(rd_ptr & (IDB_QSIZE - 1), 443 iowrite32(rd_ptr & (IDB_QSIZE - 1),
@@ -445,6 +448,10 @@ static void tsi721_db_dpc(struct work_struct *work)
445 regval |= TSI721_SR_CHINT_IDBQRCV; 448 regval |= TSI721_SR_CHINT_IDBQRCV;
446 iowrite32(regval, 449 iowrite32(regval,
447 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 450 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
451
452 wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
453 if (wr_ptr != rd_ptr)
454 schedule_work(&priv->idb_work);
448} 455}
449 456
450/** 457/**
@@ -2212,7 +2219,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2212 const struct pci_device_id *id) 2219 const struct pci_device_id *id)
2213{ 2220{
2214 struct tsi721_device *priv; 2221 struct tsi721_device *priv;
2215 int i, cap; 2222 int cap;
2216 int err; 2223 int err;
2217 u32 regval; 2224 u32 regval;
2218 2225
@@ -2232,12 +2239,15 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2232 priv->pdev = pdev; 2239 priv->pdev = pdev;
2233 2240
2234#ifdef DEBUG 2241#ifdef DEBUG
2242 {
2243 int i;
2235 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { 2244 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
2236 dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n", 2245 dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n",
2237 i, (unsigned long long)pci_resource_start(pdev, i), 2246 i, (unsigned long long)pci_resource_start(pdev, i),
2238 (unsigned long)pci_resource_len(pdev, i), 2247 (unsigned long)pci_resource_len(pdev, i),
2239 pci_resource_flags(pdev, i)); 2248 pci_resource_flags(pdev, i));
2240 } 2249 }
2250 }
2241#endif 2251#endif
2242 /* 2252 /*
2243 * Verify BAR configuration 2253 * Verify BAR configuration
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 182b553059c9..c151fd5d8c97 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -486,6 +486,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
486 .id = AB3100_BUCK, 486 .id = AB3100_BUCK,
487 .ops = &regulator_ops_variable_sleepable, 487 .ops = &regulator_ops_variable_sleepable,
488 .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages), 488 .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages),
489 .volt_table = ldo_e_buck_typ_voltages,
489 .type = REGULATOR_VOLTAGE, 490 .type = REGULATOR_VOLTAGE,
490 .owner = THIS_MODULE, 491 .owner = THIS_MODULE,
491 .enable_time = 1000, 492 .enable_time = 1000,
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index e9c2085f9dfb..ce0fe72a428e 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -64,14 +64,15 @@ static int anatop_set_voltage_sel(struct regulator_dev *reg, unsigned selector)
64static int anatop_get_voltage_sel(struct regulator_dev *reg) 64static int anatop_get_voltage_sel(struct regulator_dev *reg)
65{ 65{
66 struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); 66 struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
67 u32 val; 67 u32 val, mask;
68 68
69 if (!anatop_reg->control_reg) 69 if (!anatop_reg->control_reg)
70 return -ENOTSUPP; 70 return -ENOTSUPP;
71 71
72 val = anatop_read_reg(anatop_reg->mfd, anatop_reg->control_reg); 72 val = anatop_read_reg(anatop_reg->mfd, anatop_reg->control_reg);
73 val = (val & ((1 << anatop_reg->vol_bit_width) - 1)) >> 73 mask = ((1 << anatop_reg->vol_bit_width) - 1) <<
74 anatop_reg->vol_bit_shift; 74 anatop_reg->vol_bit_shift;
75 val = (val & mask) >> anatop_reg->vol_bit_shift;
75 76
76 return val - anatop_reg->min_bit_val; 77 return val - anatop_reg->min_bit_val;
77} 78}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index f092588a078c..48385318175a 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3217,7 +3217,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
3217 3217
3218 dev_set_drvdata(&rdev->dev, rdev); 3218 dev_set_drvdata(&rdev->dev, rdev);
3219 3219
3220 if (config->ena_gpio) { 3220 if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) {
3221 ret = gpio_request_one(config->ena_gpio, 3221 ret = gpio_request_one(config->ena_gpio,
3222 GPIOF_DIR_OUT | config->ena_gpio_flags, 3222 GPIOF_DIR_OUT | config->ena_gpio_flags,
3223 rdev_get_name(rdev)); 3223 rdev_get_name(rdev));
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 34b67bee9323..8b5944f2d7d1 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -57,16 +57,17 @@ static int gpio_regulator_get_value(struct regulator_dev *dev)
57 return -EINVAL; 57 return -EINVAL;
58} 58}
59 59
60static int gpio_regulator_set_value(struct regulator_dev *dev, 60static int gpio_regulator_set_voltage(struct regulator_dev *dev,
61 int min, int max, unsigned *selector) 61 int min_uV, int max_uV,
62 unsigned *selector)
62{ 63{
63 struct gpio_regulator_data *data = rdev_get_drvdata(dev); 64 struct gpio_regulator_data *data = rdev_get_drvdata(dev);
64 int ptr, target = 0, state, best_val = INT_MAX; 65 int ptr, target = 0, state, best_val = INT_MAX;
65 66
66 for (ptr = 0; ptr < data->nr_states; ptr++) 67 for (ptr = 0; ptr < data->nr_states; ptr++)
67 if (data->states[ptr].value < best_val && 68 if (data->states[ptr].value < best_val &&
68 data->states[ptr].value >= min && 69 data->states[ptr].value >= min_uV &&
69 data->states[ptr].value <= max) { 70 data->states[ptr].value <= max_uV) {
70 target = data->states[ptr].gpios; 71 target = data->states[ptr].gpios;
71 best_val = data->states[ptr].value; 72 best_val = data->states[ptr].value;
72 if (selector) 73 if (selector)
@@ -85,13 +86,6 @@ static int gpio_regulator_set_value(struct regulator_dev *dev,
85 return 0; 86 return 0;
86} 87}
87 88
88static int gpio_regulator_set_voltage(struct regulator_dev *dev,
89 int min_uV, int max_uV,
90 unsigned *selector)
91{
92 return gpio_regulator_set_value(dev, min_uV, max_uV, selector);
93}
94
95static int gpio_regulator_list_voltage(struct regulator_dev *dev, 89static int gpio_regulator_list_voltage(struct regulator_dev *dev,
96 unsigned selector) 90 unsigned selector)
97{ 91{
@@ -106,7 +100,27 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev,
106static int gpio_regulator_set_current_limit(struct regulator_dev *dev, 100static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
107 int min_uA, int max_uA) 101 int min_uA, int max_uA)
108{ 102{
109 return gpio_regulator_set_value(dev, min_uA, max_uA, NULL); 103 struct gpio_regulator_data *data = rdev_get_drvdata(dev);
104 int ptr, target = 0, state, best_val = 0;
105
106 for (ptr = 0; ptr < data->nr_states; ptr++)
107 if (data->states[ptr].value > best_val &&
108 data->states[ptr].value >= min_uA &&
109 data->states[ptr].value <= max_uA) {
110 target = data->states[ptr].gpios;
111 best_val = data->states[ptr].value;
112 }
113
114 if (best_val == 0)
115 return -EINVAL;
116
117 for (ptr = 0; ptr < data->nr_gpios; ptr++) {
118 state = (target & (1 << ptr)) >> ptr;
119 gpio_set_value(data->gpios[ptr].gpio, state);
120 }
121 data->state = target;
122
123 return 0;
110} 124}
111 125
112static struct regulator_ops gpio_regulator_voltage_ops = { 126static struct regulator_ops gpio_regulator_voltage_ops = {
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 17d19fbbc490..46c7e88f8381 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -486,9 +486,12 @@ static int palmas_map_voltage_ldo(struct regulator_dev *rdev,
486{ 486{
487 int ret, voltage; 487 int ret, voltage;
488 488
489 ret = ((min_uV - 900000) / 50000) + 1; 489 if (min_uV == 0)
490 if (ret < 0) 490 return 0;
491 return ret; 491
492 if (min_uV < 900000)
493 min_uV = 900000;
494 ret = DIV_ROUND_UP(min_uV - 900000, 50000) + 1;
492 495
493 /* Map back into a voltage to verify we're still in bounds */ 496 /* Map back into a voltage to verify we're still in bounds */
494 voltage = palmas_list_voltage_ldo(rdev, ret); 497 voltage = palmas_list_voltage_ldo(rdev, ret);
@@ -586,7 +589,7 @@ static int palmas_ldo_init(struct palmas *palmas, int id,
586 589
587 addr = palmas_regs_info[id].ctrl_addr; 590 addr = palmas_regs_info[id].ctrl_addr;
588 591
589 ret = palmas_smps_read(palmas, addr, &reg); 592 ret = palmas_ldo_read(palmas, addr, &reg);
590 if (ret) 593 if (ret)
591 return ret; 594 return ret;
592 595
@@ -596,7 +599,7 @@ static int palmas_ldo_init(struct palmas *palmas, int id,
596 if (reg_init->mode_sleep) 599 if (reg_init->mode_sleep)
597 reg |= PALMAS_LDO1_CTRL_MODE_SLEEP; 600 reg |= PALMAS_LDO1_CTRL_MODE_SLEEP;
598 601
599 ret = palmas_smps_write(palmas, addr, reg); 602 ret = palmas_ldo_write(palmas, addr, reg);
600 if (ret) 603 if (ret)
601 return ret; 604 return ret;
602 605
@@ -630,7 +633,7 @@ static __devinit int palmas_probe(struct platform_device *pdev)
630 633
631 ret = palmas_smps_read(palmas, PALMAS_SMPS_CTRL, &reg); 634 ret = palmas_smps_read(palmas, PALMAS_SMPS_CTRL, &reg);
632 if (ret) 635 if (ret)
633 goto err_unregister_regulator; 636 return ret;
634 637
635 if (reg & PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN) 638 if (reg & PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN)
636 pmic->smps123 = 1; 639 pmic->smps123 = 1;
@@ -676,7 +679,9 @@ static __devinit int palmas_probe(struct platform_device *pdev)
676 case PALMAS_REG_SMPS10: 679 case PALMAS_REG_SMPS10:
677 pmic->desc[id].n_voltages = PALMAS_SMPS10_NUM_VOLTAGES; 680 pmic->desc[id].n_voltages = PALMAS_SMPS10_NUM_VOLTAGES;
678 pmic->desc[id].ops = &palmas_ops_smps10; 681 pmic->desc[id].ops = &palmas_ops_smps10;
679 pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL; 682 pmic->desc[id].vsel_reg =
683 PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
684 PALMAS_SMPS10_CTRL);
680 pmic->desc[id].vsel_mask = SMPS10_VSEL; 685 pmic->desc[id].vsel_mask = SMPS10_VSEL;
681 pmic->desc[id].enable_reg = 686 pmic->desc[id].enable_reg =
682 PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, 687 PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
@@ -778,8 +783,10 @@ static __devinit int palmas_probe(struct platform_device *pdev)
778 reg_init = pdata->reg_init[id]; 783 reg_init = pdata->reg_init[id];
779 if (reg_init) { 784 if (reg_init) {
780 ret = palmas_ldo_init(palmas, id, reg_init); 785 ret = palmas_ldo_init(palmas, id, reg_init);
781 if (ret) 786 if (ret) {
787 regulator_unregister(pmic->rdev[id]);
782 goto err_unregister_regulator; 788 goto err_unregister_regulator;
789 }
783 } 790 }
784 } 791 }
785 } 792 }
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index e6da90ab5153..19241fc30050 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -240,14 +240,16 @@ static struct tps6586x_regulator tps6586x_regulator[] = {
240 TPS6586X_LDO(LDO_9, "vinldo9", ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7), 240 TPS6586X_LDO(LDO_9, "vinldo9", ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7),
241 TPS6586X_LDO(LDO_RTC, NULL, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7), 241 TPS6586X_LDO(LDO_RTC, NULL, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7),
242 TPS6586X_LDO(LDO_1, "vinldo01", dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1), 242 TPS6586X_LDO(LDO_1, "vinldo01", dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1),
243 TPS6586X_LDO(SM_2, "sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), 243 TPS6586X_LDO(SM_2, "vin-sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
244 244
245 TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3, 245 TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3,
246 ENB, 3, VCC2, 6), 246 ENB, 3, VCC2, 6),
247 TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3, 247 TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3,
248 END, 3, VCC1, 6), 248 END, 3, VCC1, 6),
249 TPS6586X_DVM(SM_0, "sm0", dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2), 249 TPS6586X_DVM(SM_0, "vin-sm0", dvm, SM0V1, 0, 5, ENA, 1,
250 TPS6586X_DVM(SM_1, "sm1", dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0), 250 ENB, 1, VCC1, 2),
251 TPS6586X_DVM(SM_1, "vin-sm1", dvm, SM1V1, 0, 5, ENA, 0,
252 ENB, 0, VCC1, 0),
251}; 253};
252 254
253/* 255/*
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 242fe90dc565..77a71a5c17c3 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -1037,7 +1037,7 @@ TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300);
1037TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300); 1037TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300);
1038TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300); 1038TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300);
1039TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300); 1039TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300);
1040TWL4030_FIXED_LDO(VINTANA2, 0x3f, 1500, 11, 100, 0x08); 1040TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08);
1041TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08); 1041TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08);
1042TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08); 1042TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08);
1043TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08); 1043TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08);
@@ -1048,7 +1048,6 @@ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0);
1048TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0); 1048TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0);
1049TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0); 1049TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0);
1050TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0); 1050TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0);
1051TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0);
1052TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34); 1051TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34);
1053TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10); 1052TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10);
1054TWL6025_ADJUSTABLE_SMPS(VIO, 0x16); 1053TWL6025_ADJUSTABLE_SMPS(VIO, 0x16);
@@ -1117,7 +1116,7 @@ static const struct of_device_id twl_of_match[] __devinitconst = {
1117 TWL6025_OF_MATCH("ti,twl6025-ldo6", LDO6), 1116 TWL6025_OF_MATCH("ti,twl6025-ldo6", LDO6),
1118 TWL6025_OF_MATCH("ti,twl6025-ldoln", LDOLN), 1117 TWL6025_OF_MATCH("ti,twl6025-ldoln", LDOLN),
1119 TWL6025_OF_MATCH("ti,twl6025-ldousb", LDOUSB), 1118 TWL6025_OF_MATCH("ti,twl6025-ldousb", LDOUSB),
1120 TWLFIXED_OF_MATCH("ti,twl4030-vintana2", VINTANA2), 1119 TWLFIXED_OF_MATCH("ti,twl4030-vintana1", VINTANA1),
1121 TWLFIXED_OF_MATCH("ti,twl4030-vintdig", VINTDIG), 1120 TWLFIXED_OF_MATCH("ti,twl4030-vintdig", VINTDIG),
1122 TWLFIXED_OF_MATCH("ti,twl4030-vusb1v5", VUSB1V5), 1121 TWLFIXED_OF_MATCH("ti,twl4030-vusb1v5", VUSB1V5),
1123 TWLFIXED_OF_MATCH("ti,twl4030-vusb1v8", VUSB1V8), 1122 TWLFIXED_OF_MATCH("ti,twl4030-vusb1v8", VUSB1V8),
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 836118795c0b..13e4df63974f 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -43,6 +43,7 @@
43#include <linux/rtc.h> 43#include <linux/rtc.h>
44#include <linux/spi/spi.h> 44#include <linux/spi/spi.h>
45#include <linux/module.h> 45#include <linux/module.h>
46#include <linux/sysfs.h>
46 47
47#define DRV_VERSION "0.6" 48#define DRV_VERSION "0.6"
48 49
@@ -292,6 +293,7 @@ static int __devinit pcf2123_probe(struct spi_device *spi)
292 pdata->rtc = rtc; 293 pdata->rtc = rtc;
293 294
294 for (i = 0; i < 16; i++) { 295 for (i = 0; i < 16; i++) {
296 sysfs_attr_init(&pdata->regs[i].attr.attr);
295 sprintf(pdata->regs[i].name, "%1x", i); 297 sprintf(pdata->regs[i].name, "%1x", i);
296 pdata->regs[i].attr.attr.mode = S_IRUGO | S_IWUSR; 298 pdata->regs[i].attr.attr.mode = S_IRUGO | S_IWUSR;
297 pdata->regs[i].attr.attr.name = pdata->regs[i].name; 299 pdata->regs[i].attr.attr.name = pdata->regs[i].name;
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index 77074ccd2850..fd5c7af04ae5 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -122,9 +122,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm)
122 tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK); 122 tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);
123 tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK); 123 tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);
124 if (!pdata->rtc_24h) { 124 if (!pdata->rtc_24h) {
125 tm->tm_hour %= 12; 125 if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) {
126 if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) 126 tm->tm_hour -= 20;
127 tm->tm_hour %= 12;
127 tm->tm_hour += 12; 128 tm->tm_hour += 12;
129 } else
130 tm->tm_hour %= 12;
128 } 131 }
129 tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK); 132 tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);
130 tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK); 133 tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 40a826a7295f..2fb2b9ea97ec 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3804,7 +3804,7 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
3804 case BIODASDSYMMIO: 3804 case BIODASDSYMMIO:
3805 return dasd_symm_io(device, argp); 3805 return dasd_symm_io(device, argp);
3806 default: 3806 default:
3807 return -ENOIOCTLCMD; 3807 return -ENOTTY;
3808 } 3808 }
3809} 3809}
3810 3810
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index cceae70279f6..654c6921a6d4 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -498,12 +498,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
498 break; 498 break;
499 default: 499 default:
500 /* if the discipline has an ioctl method try it. */ 500 /* if the discipline has an ioctl method try it. */
501 if (base->discipline->ioctl) { 501 rc = -ENOTTY;
502 if (base->discipline->ioctl)
502 rc = base->discipline->ioctl(block, cmd, argp); 503 rc = base->discipline->ioctl(block, cmd, argp);
503 if (rc == -ENOIOCTLCMD)
504 rc = -EINVAL;
505 } else
506 rc = -EINVAL;
507 } 504 }
508 dasd_put_device(base); 505 dasd_put_device(base);
509 return rc; 506 return rc;
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 6e25ef1bce91..a9f4049c6769 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -47,6 +47,8 @@ struct bcm63xx_spi {
47 /* Platform data */ 47 /* Platform data */
48 u32 speed_hz; 48 u32 speed_hz;
49 unsigned fifo_size; 49 unsigned fifo_size;
50 unsigned int msg_type_shift;
51 unsigned int msg_ctl_width;
50 52
51 /* Data buffers */ 53 /* Data buffers */
52 const unsigned char *tx_ptr; 54 const unsigned char *tx_ptr;
@@ -221,13 +223,20 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
221 msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT); 223 msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT);
222 224
223 if (t->rx_buf && t->tx_buf) 225 if (t->rx_buf && t->tx_buf)
224 msg_ctl |= (SPI_FD_RW << SPI_MSG_TYPE_SHIFT); 226 msg_ctl |= (SPI_FD_RW << bs->msg_type_shift);
225 else if (t->rx_buf) 227 else if (t->rx_buf)
226 msg_ctl |= (SPI_HD_R << SPI_MSG_TYPE_SHIFT); 228 msg_ctl |= (SPI_HD_R << bs->msg_type_shift);
227 else if (t->tx_buf) 229 else if (t->tx_buf)
228 msg_ctl |= (SPI_HD_W << SPI_MSG_TYPE_SHIFT); 230 msg_ctl |= (SPI_HD_W << bs->msg_type_shift);
229 231
230 bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL); 232 switch (bs->msg_ctl_width) {
233 case 8:
234 bcm_spi_writeb(bs, msg_ctl, SPI_MSG_CTL);
235 break;
236 case 16:
237 bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL);
238 break;
239 }
231 240
232 /* Issue the transfer */ 241 /* Issue the transfer */
233 cmd = SPI_CMD_START_IMMEDIATE; 242 cmd = SPI_CMD_START_IMMEDIATE;
@@ -406,9 +415,21 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
406 master->transfer_one_message = bcm63xx_spi_transfer_one; 415 master->transfer_one_message = bcm63xx_spi_transfer_one;
407 master->mode_bits = MODEBITS; 416 master->mode_bits = MODEBITS;
408 bs->speed_hz = pdata->speed_hz; 417 bs->speed_hz = pdata->speed_hz;
418 bs->msg_type_shift = pdata->msg_type_shift;
419 bs->msg_ctl_width = pdata->msg_ctl_width;
409 bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA)); 420 bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
410 bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA)); 421 bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA));
411 422
423 switch (bs->msg_ctl_width) {
424 case 8:
425 case 16:
426 break;
427 default:
428 dev_err(dev, "unsupported MSG_CTL width: %d\n",
429 bs->msg_ctl_width);
430 goto out_clk_disable;
431 }
432
412 /* Initialize hardware */ 433 /* Initialize hardware */
413 clk_enable(bs->clk); 434 clk_enable(bs->clk);
414 bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); 435 bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
@@ -438,7 +459,7 @@ out:
438 459
439static int __devexit bcm63xx_spi_remove(struct platform_device *pdev) 460static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)
440{ 461{
441 struct spi_master *master = platform_get_drvdata(pdev); 462 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
442 struct bcm63xx_spi *bs = spi_master_get_devdata(master); 463 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
443 464
444 spi_unregister_master(master); 465 spi_unregister_master(master);
@@ -452,6 +473,8 @@ static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)
452 473
453 platform_set_drvdata(pdev, 0); 474 platform_set_drvdata(pdev, 0);
454 475
476 spi_master_put(master);
477
455 return 0; 478 return 0;
456} 479}
457 480
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index b2d4b9e4e010..764bfee75920 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -533,7 +533,6 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev)
533 iounmap(mcfqspi->iobase); 533 iounmap(mcfqspi->iobase);
534 release_mem_region(res->start, resource_size(res)); 534 release_mem_region(res->start, resource_size(res));
535 spi_unregister_master(master); 535 spi_unregister_master(master);
536 spi_master_put(master);
537 536
538 return 0; 537 return 0;
539} 538}
@@ -541,7 +540,7 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev)
541#ifdef CONFIG_PM_SLEEP 540#ifdef CONFIG_PM_SLEEP
542static int mcfqspi_suspend(struct device *dev) 541static int mcfqspi_suspend(struct device *dev)
543{ 542{
544 struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); 543 struct spi_master *master = dev_get_drvdata(dev);
545 struct mcfqspi *mcfqspi = spi_master_get_devdata(master); 544 struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
546 545
547 spi_master_suspend(master); 546 spi_master_suspend(master);
@@ -553,7 +552,7 @@ static int mcfqspi_suspend(struct device *dev)
553 552
554static int mcfqspi_resume(struct device *dev) 553static int mcfqspi_resume(struct device *dev)
555{ 554{
556 struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); 555 struct spi_master *master = dev_get_drvdata(dev);
557 struct mcfqspi *mcfqspi = spi_master_get_devdata(master); 556 struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
558 557
559 spi_master_resume(master); 558 spi_master_resume(master);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index bc4778175e34..b2fb141da375 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1228,18 +1228,16 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
1228 1228
1229 status = spi_register_master(master); 1229 status = spi_register_master(master);
1230 if (status < 0) 1230 if (status < 0)
1231 goto err_spi_register; 1231 goto disable_pm;
1232 1232
1233 return status; 1233 return status;
1234 1234
1235err_spi_register:
1236 spi_master_put(master);
1237disable_pm: 1235disable_pm:
1238 pm_runtime_disable(&pdev->dev); 1236 pm_runtime_disable(&pdev->dev);
1239dma_chnl_free: 1237dma_chnl_free:
1240 kfree(mcspi->dma_channels); 1238 kfree(mcspi->dma_channels);
1241free_master: 1239free_master:
1242 kfree(master); 1240 spi_master_put(master);
1243 platform_set_drvdata(pdev, NULL); 1241 platform_set_drvdata(pdev, NULL);
1244 return status; 1242 return status;
1245} 1243}
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index aab518ec2bbc..6abbe23c39b4 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2053,7 +2053,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2053 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", 2053 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
2054 adev->res.start, pl022->virtbase); 2054 adev->res.start, pl022->virtbase);
2055 2055
2056 pm_runtime_enable(dev);
2057 pm_runtime_resume(dev); 2056 pm_runtime_resume(dev);
2058 2057
2059 pl022->clk = clk_get(&adev->dev, NULL); 2058 pl022->clk = clk_get(&adev->dev, NULL);
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index cfa2c35dfeed..d1c8441f638c 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1479,40 +1479,40 @@ static const struct dev_pm_ops s3c64xx_spi_pm = {
1479 s3c64xx_spi_runtime_resume, NULL) 1479 s3c64xx_spi_runtime_resume, NULL)
1480}; 1480};
1481 1481
1482struct s3c64xx_spi_port_config s3c2443_spi_port_config = { 1482static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
1483 .fifo_lvl_mask = { 0x7f }, 1483 .fifo_lvl_mask = { 0x7f },
1484 .rx_lvl_offset = 13, 1484 .rx_lvl_offset = 13,
1485 .tx_st_done = 21, 1485 .tx_st_done = 21,
1486 .high_speed = true, 1486 .high_speed = true,
1487}; 1487};
1488 1488
1489struct s3c64xx_spi_port_config s3c6410_spi_port_config = { 1489static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
1490 .fifo_lvl_mask = { 0x7f, 0x7F }, 1490 .fifo_lvl_mask = { 0x7f, 0x7F },
1491 .rx_lvl_offset = 13, 1491 .rx_lvl_offset = 13,
1492 .tx_st_done = 21, 1492 .tx_st_done = 21,
1493}; 1493};
1494 1494
1495struct s3c64xx_spi_port_config s5p64x0_spi_port_config = { 1495static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = {
1496 .fifo_lvl_mask = { 0x1ff, 0x7F }, 1496 .fifo_lvl_mask = { 0x1ff, 0x7F },
1497 .rx_lvl_offset = 15, 1497 .rx_lvl_offset = 15,
1498 .tx_st_done = 25, 1498 .tx_st_done = 25,
1499}; 1499};
1500 1500
1501struct s3c64xx_spi_port_config s5pc100_spi_port_config = { 1501static struct s3c64xx_spi_port_config s5pc100_spi_port_config = {
1502 .fifo_lvl_mask = { 0x7f, 0x7F }, 1502 .fifo_lvl_mask = { 0x7f, 0x7F },
1503 .rx_lvl_offset = 13, 1503 .rx_lvl_offset = 13,
1504 .tx_st_done = 21, 1504 .tx_st_done = 21,
1505 .high_speed = true, 1505 .high_speed = true,
1506}; 1506};
1507 1507
1508struct s3c64xx_spi_port_config s5pv210_spi_port_config = { 1508static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
1509 .fifo_lvl_mask = { 0x1ff, 0x7F }, 1509 .fifo_lvl_mask = { 0x1ff, 0x7F },
1510 .rx_lvl_offset = 15, 1510 .rx_lvl_offset = 15,
1511 .tx_st_done = 25, 1511 .tx_st_done = 25,
1512 .high_speed = true, 1512 .high_speed = true,
1513}; 1513};
1514 1514
1515struct s3c64xx_spi_port_config exynos4_spi_port_config = { 1515static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
1516 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F }, 1516 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
1517 .rx_lvl_offset = 15, 1517 .rx_lvl_offset = 15,
1518 .tx_st_done = 25, 1518 .tx_st_done = 25,
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index b06fd5b723fa..d536756549e6 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -189,7 +189,7 @@ DEVICE_PARAM(b80211hEnable, "802.11h mode");
189// Static vars definitions 189// Static vars definitions
190// 190//
191 191
192static struct usb_device_id vt6656_table[] __devinitdata = { 192static struct usb_device_id vt6656_table[] = {
193 {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)}, 193 {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},
194 {} 194 {}
195}; 195};
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index ef360547ecec..0ca857ac473e 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -25,7 +25,7 @@ MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver");
25MODULE_LICENSE("GPL"); 25MODULE_LICENSE("GPL");
26MODULE_VERSION("0.1"); 26MODULE_VERSION("0.1");
27 27
28static const struct usb_device_id wb35_table[] __devinitconst = { 28static const struct usb_device_id wb35_table[] = {
29 { USB_DEVICE(0x0416, 0x0035) }, 29 { USB_DEVICE(0x0416, 0x0035) },
30 { USB_DEVICE(0x18E8, 0x6201) }, 30 { USB_DEVICE(0x18E8, 0x6201) },
31 { USB_DEVICE(0x18E8, 0x6206) }, 31 { USB_DEVICE(0x18E8, 0x6206) },
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 6e32ff6f2fa0..5552fa7426bc 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -673,8 +673,15 @@ static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)
673 struct scsi_device *sd = pdv->pdv_sd; 673 struct scsi_device *sd = pdv->pdv_sd;
674 int result; 674 int result;
675 struct pscsi_plugin_task *pt = cmd->priv; 675 struct pscsi_plugin_task *pt = cmd->priv;
676 unsigned char *cdb = &pt->pscsi_cdb[0]; 676 unsigned char *cdb;
677 /*
678 * Special case for REPORT_LUNs handling where pscsi_plugin_task has
679 * not been allocated because TCM is handling the emulation directly.
680 */
681 if (!pt)
682 return 0;
677 683
684 cdb = &pt->pscsi_cdb[0];
678 result = pt->pscsi_result; 685 result = pt->pscsi_result;
679 /* 686 /*
680 * Hack to make sure that Write-Protect modepage is set if R/O mode is 687 * Hack to make sure that Write-Protect modepage is set if R/O mode is
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0eaae23d12b5..4de3186dc44e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1165,8 +1165,6 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1165 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1165 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1166 cmd->data_length, size, cmd->t_task_cdb[0]); 1166 cmd->data_length, size, cmd->t_task_cdb[0]);
1167 1167
1168 cmd->cmd_spdtl = size;
1169
1170 if (cmd->data_direction == DMA_TO_DEVICE) { 1168 if (cmd->data_direction == DMA_TO_DEVICE) {
1171 pr_err("Rejecting underflow/overflow" 1169 pr_err("Rejecting underflow/overflow"
1172 " WRITE data\n"); 1170 " WRITE data\n");
@@ -2294,9 +2292,9 @@ transport_generic_get_mem(struct se_cmd *cmd)
2294 return 0; 2292 return 0;
2295 2293
2296out: 2294out:
2297 while (i >= 0) { 2295 while (i > 0) {
2298 __free_page(sg_page(&cmd->t_data_sg[i]));
2299 i--; 2296 i--;
2297 __free_page(sg_page(&cmd->t_data_sg[i]));
2300 } 2298 }
2301 kfree(cmd->t_data_sg); 2299 kfree(cmd->t_data_sg);
2302 cmd->t_data_sg = NULL; 2300 cmd->t_data_sg = NULL;
@@ -2323,9 +2321,12 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
2323 if (ret < 0) 2321 if (ret < 0)
2324 goto out_fail; 2322 goto out_fail;
2325 } 2323 }
2326 2324 /*
2327 /* Workaround for handling zero-length control CDBs */ 2325 * If this command doesn't have any payload and we don't have to call
2328 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) { 2326 * into the fabric for data transfers, go ahead and complete it right
2327 * away.
2328 */
2329 if (!cmd->data_length) {
2329 spin_lock_irq(&cmd->t_state_lock); 2330 spin_lock_irq(&cmd->t_state_lock);
2330 cmd->t_state = TRANSPORT_COMPLETE; 2331 cmd->t_state = TRANSPORT_COMPLETE;
2331 cmd->transport_state |= CMD_T_ACTIVE; 2332 cmd->transport_state |= CMD_T_ACTIVE;
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index c5eb3c33c3db..eea69358ced3 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -131,6 +131,7 @@ extern struct list_head ft_lport_list;
131extern struct mutex ft_lport_lock; 131extern struct mutex ft_lport_lock;
132extern struct fc4_prov ft_prov; 132extern struct fc4_prov ft_prov;
133extern struct target_fabric_configfs *ft_configfs; 133extern struct target_fabric_configfs *ft_configfs;
134extern unsigned int ft_debug_logging;
134 135
135/* 136/*
136 * Fabric methods. 137 * Fabric methods.
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index b9cb5006177e..823e6922249d 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -48,7 +48,7 @@
48/* 48/*
49 * Dump cmd state for debugging. 49 * Dump cmd state for debugging.
50 */ 50 */
51void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) 51static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
52{ 52{
53 struct fc_exch *ep; 53 struct fc_exch *ep;
54 struct fc_seq *sp; 54 struct fc_seq *sp;
@@ -80,6 +80,12 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
80 } 80 }
81} 81}
82 82
83void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
84{
85 if (unlikely(ft_debug_logging))
86 _ft_dump_cmd(cmd, caller);
87}
88
83static void ft_free_cmd(struct ft_cmd *cmd) 89static void ft_free_cmd(struct ft_cmd *cmd)
84{ 90{
85 struct fc_frame *fp; 91 struct fc_frame *fp;
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 87901fa74dd7..3c9e5b57caab 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -456,7 +456,9 @@ static void ft_prlo(struct fc_rport_priv *rdata)
456 struct ft_tport *tport; 456 struct ft_tport *tport;
457 457
458 mutex_lock(&ft_lport_lock); 458 mutex_lock(&ft_lport_lock);
459 tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]); 459 tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP],
460 lockdep_is_held(&ft_lport_lock));
461
460 if (!tport) { 462 if (!tport) {
461 mutex_unlock(&ft_lport_lock); 463 mutex_unlock(&ft_lport_lock);
462 return; 464 return;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index a7773a3e02b1..7065df6036ca 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -13,7 +13,7 @@ config USB_ARCH_HAS_OHCI
13 default y if PXA3xx 13 default y if PXA3xx
14 default y if ARCH_EP93XX 14 default y if ARCH_EP93XX
15 default y if ARCH_AT91 15 default y if ARCH_AT91
16 default y if ARCH_PNX4008 && I2C 16 default y if ARCH_PNX4008
17 default y if MFD_TC6393XB 17 default y if MFD_TC6393XB
18 default y if ARCH_W90X900 18 default y if ARCH_W90X900
19 default y if ARCH_DAVINCI_DA8XX 19 default y if ARCH_DAVINCI_DA8XX
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 56d6bf668488..f763ed7ba91e 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1104,7 +1104,8 @@ skip_normal_probe:
1104 } 1104 }
1105 1105
1106 1106
1107 if (data_interface->cur_altsetting->desc.bNumEndpoints < 2) 1107 if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 ||
1108 control_interface->cur_altsetting->desc.bNumEndpoints == 0)
1108 return -EINVAL; 1109 return -EINVAL;
1109 1110
1110 epctrl = &control_interface->cur_altsetting->endpoint[0].desc; 1111 epctrl = &control_interface->cur_altsetting->endpoint[0].desc;
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index ff08015b230c..ae794b90766b 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -232,7 +232,7 @@ wraperr:
232 return err; 232 return err;
233} 233}
234 234
235static const struct usb_device_id id_table[] __devinitconst = { 235static const struct usb_device_id id_table[] = {
236 { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) }, 236 { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) },
237 { } /* Terminating entry */ 237 { } /* Terminating entry */
238}; 238};
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 9591e2b509d7..17830c9c7cc6 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -264,6 +264,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
264 return group; 264 return group;
265} 265}
266 266
267/* called with vfio.group_lock held */
267static void vfio_group_release(struct kref *kref) 268static void vfio_group_release(struct kref *kref)
268{ 269{
269 struct vfio_group *group = container_of(kref, struct vfio_group, kref); 270 struct vfio_group *group = container_of(kref, struct vfio_group, kref);
@@ -287,13 +288,7 @@ static void vfio_group_release(struct kref *kref)
287 288
288static void vfio_group_put(struct vfio_group *group) 289static void vfio_group_put(struct vfio_group *group)
289{ 290{
290 mutex_lock(&vfio.group_lock); 291 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
291 /*
292 * Release needs to unlock to unregister the notifier, so only
293 * unlock if not released.
294 */
295 if (!kref_put(&group->kref, vfio_group_release))
296 mutex_unlock(&vfio.group_lock);
297} 292}
298 293
299/* Assume group_lock or group reference is held */ 294/* Assume group_lock or group reference is held */
@@ -401,7 +396,6 @@ static void vfio_device_release(struct kref *kref)
401 struct vfio_device, kref); 396 struct vfio_device, kref);
402 struct vfio_group *group = device->group; 397 struct vfio_group *group = device->group;
403 398
404 mutex_lock(&group->device_lock);
405 list_del(&device->group_next); 399 list_del(&device->group_next);
406 mutex_unlock(&group->device_lock); 400 mutex_unlock(&group->device_lock);
407 401
@@ -416,8 +410,9 @@ static void vfio_device_release(struct kref *kref)
416/* Device reference always implies a group reference */ 410/* Device reference always implies a group reference */
417static void vfio_device_put(struct vfio_device *device) 411static void vfio_device_put(struct vfio_device *device)
418{ 412{
419 kref_put(&device->kref, vfio_device_release); 413 struct vfio_group *group = device->group;
420 vfio_group_put(device->group); 414 kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
415 vfio_group_put(group);
421} 416}
422 417
423static void vfio_device_get(struct vfio_device *device) 418static void vfio_device_get(struct vfio_device *device)
@@ -1116,10 +1111,10 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1116 */ 1111 */
1117 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); 1112 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1118 1113
1119 fd_install(ret, filep);
1120
1121 vfio_device_get(device); 1114 vfio_device_get(device);
1122 atomic_inc(&group->container_users); 1115 atomic_inc(&group->container_users);
1116
1117 fd_install(ret, filep);
1123 break; 1118 break;
1124 } 1119 }
1125 mutex_unlock(&group->device_lock); 1120 mutex_unlock(&group->device_lock);
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index fb366540ed54..ed8e2e6c8df2 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -53,9 +53,14 @@
53#include "vhost.h" 53#include "vhost.h"
54#include "tcm_vhost.h" 54#include "tcm_vhost.h"
55 55
56enum {
57 VHOST_SCSI_VQ_CTL = 0,
58 VHOST_SCSI_VQ_EVT = 1,
59 VHOST_SCSI_VQ_IO = 2,
60};
61
56struct vhost_scsi { 62struct vhost_scsi {
57 atomic_t vhost_ref_cnt; 63 struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */
58 struct tcm_vhost_tpg *vs_tpg;
59 struct vhost_dev dev; 64 struct vhost_dev dev;
60 struct vhost_virtqueue vqs[3]; 65 struct vhost_virtqueue vqs[3];
61 66
@@ -131,8 +136,7 @@ static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
131 return 1; 136 return 1;
132} 137}
133 138
134static u32 tcm_vhost_get_pr_transport_id( 139static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
135 struct se_portal_group *se_tpg,
136 struct se_node_acl *se_nacl, 140 struct se_node_acl *se_nacl,
137 struct t10_pr_registration *pr_reg, 141 struct t10_pr_registration *pr_reg,
138 int *format_code, 142 int *format_code,
@@ -162,8 +166,7 @@ static u32 tcm_vhost_get_pr_transport_id(
162 format_code, buf); 166 format_code, buf);
163} 167}
164 168
165static u32 tcm_vhost_get_pr_transport_id_len( 169static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
166 struct se_portal_group *se_tpg,
167 struct se_node_acl *se_nacl, 170 struct se_node_acl *se_nacl,
168 struct t10_pr_registration *pr_reg, 171 struct t10_pr_registration *pr_reg,
169 int *format_code) 172 int *format_code)
@@ -192,8 +195,7 @@ static u32 tcm_vhost_get_pr_transport_id_len(
192 format_code); 195 format_code);
193} 196}
194 197
195static char *tcm_vhost_parse_pr_out_transport_id( 198static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
196 struct se_portal_group *se_tpg,
197 const char *buf, 199 const char *buf,
198 u32 *out_tid_len, 200 u32 *out_tid_len,
199 char **port_nexus_ptr) 201 char **port_nexus_ptr)
@@ -236,8 +238,7 @@ static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
236 return &nacl->se_node_acl; 238 return &nacl->se_node_acl;
237} 239}
238 240
239static void tcm_vhost_release_fabric_acl( 241static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
240 struct se_portal_group *se_tpg,
241 struct se_node_acl *se_nacl) 242 struct se_node_acl *se_nacl)
242{ 243{
243 struct tcm_vhost_nacl *nacl = container_of(se_nacl, 244 struct tcm_vhost_nacl *nacl = container_of(se_nacl,
@@ -297,7 +298,16 @@ static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
297 return 0; 298 return 0;
298} 299}
299 300
300static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *); 301static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
302{
303 struct vhost_scsi *vs = tv_cmd->tvc_vhost;
304
305 spin_lock_bh(&vs->vs_completion_lock);
306 list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
307 spin_unlock_bh(&vs->vs_completion_lock);
308
309 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
310}
301 311
302static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) 312static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
303{ 313{
@@ -381,7 +391,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
381 vs_completion_work); 391 vs_completion_work);
382 struct tcm_vhost_cmd *tv_cmd; 392 struct tcm_vhost_cmd *tv_cmd;
383 393
384 while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs)) != NULL) { 394 while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) {
385 struct virtio_scsi_cmd_resp v_rsp; 395 struct virtio_scsi_cmd_resp v_rsp;
386 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; 396 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
387 int ret; 397 int ret;
@@ -408,19 +418,6 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
408 vhost_signal(&vs->dev, &vs->vqs[2]); 418 vhost_signal(&vs->dev, &vs->vqs[2]);
409} 419}
410 420
411static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
412{
413 struct vhost_scsi *vs = tv_cmd->tvc_vhost;
414
415 pr_debug("%s tv_cmd %p\n", __func__, tv_cmd);
416
417 spin_lock_bh(&vs->vs_completion_lock);
418 list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
419 spin_unlock_bh(&vs->vs_completion_lock);
420
421 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
422}
423
424static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( 421static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
425 struct tcm_vhost_tpg *tv_tpg, 422 struct tcm_vhost_tpg *tv_tpg,
426 struct virtio_scsi_cmd_req *v_req, 423 struct virtio_scsi_cmd_req *v_req,
@@ -533,8 +530,8 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
533 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC); 530 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
534 if (!sg) 531 if (!sg)
535 return -ENOMEM; 532 return -ENOMEM;
536 pr_debug("%s sg %p sgl_count %u is_err %ld\n", __func__, 533 pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
537 sg, sgl_count, IS_ERR(sg)); 534 sg, sgl_count, !sg);
538 sg_init_table(sg, sgl_count); 535 sg_init_table(sg, sgl_count);
539 536
540 tv_cmd->tvc_sgl = sg; 537 tv_cmd->tvc_sgl = sg;
@@ -787,12 +784,12 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
787 784
788static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) 785static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
789{ 786{
790 pr_err("%s: The handling func for control queue.\n", __func__); 787 pr_debug("%s: The handling func for control queue.\n", __func__);
791} 788}
792 789
793static void vhost_scsi_evt_handle_kick(struct vhost_work *work) 790static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
794{ 791{
795 pr_err("%s: The handling func for event queue.\n", __func__); 792 pr_debug("%s: The handling func for event queue.\n", __func__);
796} 793}
797 794
798static void vhost_scsi_handle_kick(struct vhost_work *work) 795static void vhost_scsi_handle_kick(struct vhost_work *work)
@@ -825,11 +822,6 @@ static int vhost_scsi_set_endpoint(
825 return -EFAULT; 822 return -EFAULT;
826 } 823 }
827 } 824 }
828
829 if (vs->vs_tpg) {
830 mutex_unlock(&vs->dev.mutex);
831 return -EEXIST;
832 }
833 mutex_unlock(&vs->dev.mutex); 825 mutex_unlock(&vs->dev.mutex);
834 826
835 mutex_lock(&tcm_vhost_mutex); 827 mutex_lock(&tcm_vhost_mutex);
@@ -839,7 +831,7 @@ static int vhost_scsi_set_endpoint(
839 mutex_unlock(&tv_tpg->tv_tpg_mutex); 831 mutex_unlock(&tv_tpg->tv_tpg_mutex);
840 continue; 832 continue;
841 } 833 }
842 if (atomic_read(&tv_tpg->tv_tpg_vhost_count)) { 834 if (tv_tpg->tv_tpg_vhost_count != 0) {
843 mutex_unlock(&tv_tpg->tv_tpg_mutex); 835 mutex_unlock(&tv_tpg->tv_tpg_mutex);
844 continue; 836 continue;
845 } 837 }
@@ -847,14 +839,20 @@ static int vhost_scsi_set_endpoint(
847 839
848 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) && 840 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
849 (tv_tpg->tport_tpgt == t->vhost_tpgt)) { 841 (tv_tpg->tport_tpgt == t->vhost_tpgt)) {
850 atomic_inc(&tv_tpg->tv_tpg_vhost_count); 842 tv_tpg->tv_tpg_vhost_count++;
851 smp_mb__after_atomic_inc();
852 mutex_unlock(&tv_tpg->tv_tpg_mutex); 843 mutex_unlock(&tv_tpg->tv_tpg_mutex);
853 mutex_unlock(&tcm_vhost_mutex); 844 mutex_unlock(&tcm_vhost_mutex);
854 845
855 mutex_lock(&vs->dev.mutex); 846 mutex_lock(&vs->dev.mutex);
847 if (vs->vs_tpg) {
848 mutex_unlock(&vs->dev.mutex);
849 mutex_lock(&tv_tpg->tv_tpg_mutex);
850 tv_tpg->tv_tpg_vhost_count--;
851 mutex_unlock(&tv_tpg->tv_tpg_mutex);
852 return -EEXIST;
853 }
854
856 vs->vs_tpg = tv_tpg; 855 vs->vs_tpg = tv_tpg;
857 atomic_inc(&vs->vhost_ref_cnt);
858 smp_mb__after_atomic_inc(); 856 smp_mb__after_atomic_inc();
859 mutex_unlock(&vs->dev.mutex); 857 mutex_unlock(&vs->dev.mutex);
860 return 0; 858 return 0;
@@ -871,38 +869,42 @@ static int vhost_scsi_clear_endpoint(
871{ 869{
872 struct tcm_vhost_tport *tv_tport; 870 struct tcm_vhost_tport *tv_tport;
873 struct tcm_vhost_tpg *tv_tpg; 871 struct tcm_vhost_tpg *tv_tpg;
874 int index; 872 int index, ret;
875 873
876 mutex_lock(&vs->dev.mutex); 874 mutex_lock(&vs->dev.mutex);
877 /* Verify that ring has been setup correctly. */ 875 /* Verify that ring has been setup correctly. */
878 for (index = 0; index < vs->dev.nvqs; ++index) { 876 for (index = 0; index < vs->dev.nvqs; ++index) {
879 if (!vhost_vq_access_ok(&vs->vqs[index])) { 877 if (!vhost_vq_access_ok(&vs->vqs[index])) {
880 mutex_unlock(&vs->dev.mutex); 878 ret = -EFAULT;
881 return -EFAULT; 879 goto err;
882 } 880 }
883 } 881 }
884 882
885 if (!vs->vs_tpg) { 883 if (!vs->vs_tpg) {
886 mutex_unlock(&vs->dev.mutex); 884 ret = -ENODEV;
887 return -ENODEV; 885 goto err;
888 } 886 }
889 tv_tpg = vs->vs_tpg; 887 tv_tpg = vs->vs_tpg;
890 tv_tport = tv_tpg->tport; 888 tv_tport = tv_tpg->tport;
891 889
892 if (strcmp(tv_tport->tport_name, t->vhost_wwpn) || 890 if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
893 (tv_tpg->tport_tpgt != t->vhost_tpgt)) { 891 (tv_tpg->tport_tpgt != t->vhost_tpgt)) {
894 mutex_unlock(&vs->dev.mutex);
895 pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu" 892 pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
896 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", 893 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
897 tv_tport->tport_name, tv_tpg->tport_tpgt, 894 tv_tport->tport_name, tv_tpg->tport_tpgt,
898 t->vhost_wwpn, t->vhost_tpgt); 895 t->vhost_wwpn, t->vhost_tpgt);
899 return -EINVAL; 896 ret = -EINVAL;
897 goto err;
900 } 898 }
901 atomic_dec(&tv_tpg->tv_tpg_vhost_count); 899 tv_tpg->tv_tpg_vhost_count--;
902 vs->vs_tpg = NULL; 900 vs->vs_tpg = NULL;
903 mutex_unlock(&vs->dev.mutex); 901 mutex_unlock(&vs->dev.mutex);
904 902
905 return 0; 903 return 0;
904
905err:
906 mutex_unlock(&vs->dev.mutex);
907 return ret;
906} 908}
907 909
908static int vhost_scsi_open(struct inode *inode, struct file *f) 910static int vhost_scsi_open(struct inode *inode, struct file *f)
@@ -918,9 +920,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
918 INIT_LIST_HEAD(&s->vs_completion_list); 920 INIT_LIST_HEAD(&s->vs_completion_list);
919 spin_lock_init(&s->vs_completion_lock); 921 spin_lock_init(&s->vs_completion_lock);
920 922
921 s->vqs[0].handle_kick = vhost_scsi_ctl_handle_kick; 923 s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
922 s->vqs[1].handle_kick = vhost_scsi_evt_handle_kick; 924 s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
923 s->vqs[2].handle_kick = vhost_scsi_handle_kick; 925 s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick;
924 r = vhost_dev_init(&s->dev, s->vqs, 3); 926 r = vhost_dev_init(&s->dev, s->vqs, 3);
925 if (r < 0) { 927 if (r < 0) {
926 kfree(s); 928 kfree(s);
@@ -949,6 +951,18 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
949 return 0; 951 return 0;
950} 952}
951 953
954static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
955{
956 vhost_poll_flush(&vs->dev.vqs[index].poll);
957}
958
959static void vhost_scsi_flush(struct vhost_scsi *vs)
960{
961 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL);
962 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT);
963 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO);
964}
965
952static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) 966static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
953{ 967{
954 if (features & ~VHOST_FEATURES) 968 if (features & ~VHOST_FEATURES)
@@ -961,7 +975,8 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
961 return -EFAULT; 975 return -EFAULT;
962 } 976 }
963 vs->dev.acked_features = features; 977 vs->dev.acked_features = features;
964 /* TODO possibly smp_wmb() and flush vqs */ 978 smp_wmb();
979 vhost_scsi_flush(vs);
965 mutex_unlock(&vs->dev.mutex); 980 mutex_unlock(&vs->dev.mutex);
966 return 0; 981 return 0;
967} 982}
@@ -974,26 +989,25 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
974 void __user *argp = (void __user *)arg; 989 void __user *argp = (void __user *)arg;
975 u64 __user *featurep = argp; 990 u64 __user *featurep = argp;
976 u64 features; 991 u64 features;
977 int r; 992 int r, abi_version = VHOST_SCSI_ABI_VERSION;
978 993
979 switch (ioctl) { 994 switch (ioctl) {
980 case VHOST_SCSI_SET_ENDPOINT: 995 case VHOST_SCSI_SET_ENDPOINT:
981 if (copy_from_user(&backend, argp, sizeof backend)) 996 if (copy_from_user(&backend, argp, sizeof backend))
982 return -EFAULT; 997 return -EFAULT;
998 if (backend.reserved != 0)
999 return -EOPNOTSUPP;
983 1000
984 return vhost_scsi_set_endpoint(vs, &backend); 1001 return vhost_scsi_set_endpoint(vs, &backend);
985 case VHOST_SCSI_CLEAR_ENDPOINT: 1002 case VHOST_SCSI_CLEAR_ENDPOINT:
986 if (copy_from_user(&backend, argp, sizeof backend)) 1003 if (copy_from_user(&backend, argp, sizeof backend))
987 return -EFAULT; 1004 return -EFAULT;
1005 if (backend.reserved != 0)
1006 return -EOPNOTSUPP;
988 1007
989 return vhost_scsi_clear_endpoint(vs, &backend); 1008 return vhost_scsi_clear_endpoint(vs, &backend);
990 case VHOST_SCSI_GET_ABI_VERSION: 1009 case VHOST_SCSI_GET_ABI_VERSION:
991 if (copy_from_user(&backend, argp, sizeof backend)) 1010 if (copy_to_user(argp, &abi_version, sizeof abi_version))
992 return -EFAULT;
993
994 backend.abi_version = VHOST_SCSI_ABI_VERSION;
995
996 if (copy_to_user(argp, &backend, sizeof backend))
997 return -EFAULT; 1011 return -EFAULT;
998 return 0; 1012 return 0;
999 case VHOST_GET_FEATURES: 1013 case VHOST_GET_FEATURES:
@@ -1013,11 +1027,21 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
1013 } 1027 }
1014} 1028}
1015 1029
1030#ifdef CONFIG_COMPAT
1031static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1032 unsigned long arg)
1033{
1034 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1035}
1036#endif
1037
1016static const struct file_operations vhost_scsi_fops = { 1038static const struct file_operations vhost_scsi_fops = {
1017 .owner = THIS_MODULE, 1039 .owner = THIS_MODULE,
1018 .release = vhost_scsi_release, 1040 .release = vhost_scsi_release,
1019 .unlocked_ioctl = vhost_scsi_ioctl, 1041 .unlocked_ioctl = vhost_scsi_ioctl,
1020 /* TODO compat ioctl? */ 1042#ifdef CONFIG_COMPAT
1043 .compat_ioctl = vhost_scsi_compat_ioctl,
1044#endif
1021 .open = vhost_scsi_open, 1045 .open = vhost_scsi_open,
1022 .llseek = noop_llseek, 1046 .llseek = noop_llseek,
1023}; 1047};
@@ -1054,28 +1078,28 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1054 return "Unknown"; 1078 return "Unknown";
1055} 1079}
1056 1080
1057static int tcm_vhost_port_link( 1081static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1058 struct se_portal_group *se_tpg,
1059 struct se_lun *lun) 1082 struct se_lun *lun)
1060{ 1083{
1061 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, 1084 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1062 struct tcm_vhost_tpg, se_tpg); 1085 struct tcm_vhost_tpg, se_tpg);
1063 1086
1064 atomic_inc(&tv_tpg->tv_tpg_port_count); 1087 mutex_lock(&tv_tpg->tv_tpg_mutex);
1065 smp_mb__after_atomic_inc(); 1088 tv_tpg->tv_tpg_port_count++;
1089 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1066 1090
1067 return 0; 1091 return 0;
1068} 1092}
1069 1093
1070static void tcm_vhost_port_unlink( 1094static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1071 struct se_portal_group *se_tpg,
1072 struct se_lun *se_lun) 1095 struct se_lun *se_lun)
1073{ 1096{
1074 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, 1097 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1075 struct tcm_vhost_tpg, se_tpg); 1098 struct tcm_vhost_tpg, se_tpg);
1076 1099
1077 atomic_dec(&tv_tpg->tv_tpg_port_count); 1100 mutex_lock(&tv_tpg->tv_tpg_mutex);
1078 smp_mb__after_atomic_dec(); 1101 tv_tpg->tv_tpg_port_count--;
1102 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1079} 1103}
1080 1104
1081static struct se_node_acl *tcm_vhost_make_nodeacl( 1105static struct se_node_acl *tcm_vhost_make_nodeacl(
@@ -1122,8 +1146,7 @@ static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1122 kfree(nacl); 1146 kfree(nacl);
1123} 1147}
1124 1148
1125static int tcm_vhost_make_nexus( 1149static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
1126 struct tcm_vhost_tpg *tv_tpg,
1127 const char *name) 1150 const char *name)
1128{ 1151{
1129 struct se_portal_group *se_tpg; 1152 struct se_portal_group *se_tpg;
@@ -1168,7 +1191,7 @@ static int tcm_vhost_make_nexus(
1168 return -ENOMEM; 1191 return -ENOMEM;
1169 } 1192 }
1170 /* 1193 /*
1171 * Now register the TCM vHost virtual I_T Nexus as active with the 1194 * Now register the TCM vhost virtual I_T Nexus as active with the
1172 * call to __transport_register_session() 1195 * call to __transport_register_session()
1173 */ 1196 */
1174 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, 1197 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
@@ -1179,8 +1202,7 @@ static int tcm_vhost_make_nexus(
1179 return 0; 1202 return 0;
1180} 1203}
1181 1204
1182static int tcm_vhost_drop_nexus( 1205static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1183 struct tcm_vhost_tpg *tpg)
1184{ 1206{
1185 struct se_session *se_sess; 1207 struct se_session *se_sess;
1186 struct tcm_vhost_nexus *tv_nexus; 1208 struct tcm_vhost_nexus *tv_nexus;
@@ -1198,27 +1220,27 @@ static int tcm_vhost_drop_nexus(
1198 return -ENODEV; 1220 return -ENODEV;
1199 } 1221 }
1200 1222
1201 if (atomic_read(&tpg->tv_tpg_port_count)) { 1223 if (tpg->tv_tpg_port_count != 0) {
1202 mutex_unlock(&tpg->tv_tpg_mutex); 1224 mutex_unlock(&tpg->tv_tpg_mutex);
1203 pr_err("Unable to remove TCM_vHost I_T Nexus with" 1225 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1204 " active TPG port count: %d\n", 1226 " active TPG port count: %d\n",
1205 atomic_read(&tpg->tv_tpg_port_count)); 1227 tpg->tv_tpg_port_count);
1206 return -EPERM; 1228 return -EBUSY;
1207 } 1229 }
1208 1230
1209 if (atomic_read(&tpg->tv_tpg_vhost_count)) { 1231 if (tpg->tv_tpg_vhost_count != 0) {
1210 mutex_unlock(&tpg->tv_tpg_mutex); 1232 mutex_unlock(&tpg->tv_tpg_mutex);
1211 pr_err("Unable to remove TCM_vHost I_T Nexus with" 1233 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1212 " active TPG vhost count: %d\n", 1234 " active TPG vhost count: %d\n",
1213 atomic_read(&tpg->tv_tpg_vhost_count)); 1235 tpg->tv_tpg_vhost_count);
1214 return -EPERM; 1236 return -EBUSY;
1215 } 1237 }
1216 1238
1217 pr_debug("TCM_vHost_ConfigFS: Removing I_T Nexus to emulated" 1239 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1218 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), 1240 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1219 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 1241 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1220 /* 1242 /*
1221 * Release the SCSI I_T Nexus to the emulated vHost Target Port 1243 * Release the SCSI I_T Nexus to the emulated vhost Target Port
1222 */ 1244 */
1223 transport_deregister_session(tv_nexus->tvn_se_sess); 1245 transport_deregister_session(tv_nexus->tvn_se_sess);
1224 tpg->tpg_nexus = NULL; 1246 tpg->tpg_nexus = NULL;
@@ -1228,8 +1250,7 @@ static int tcm_vhost_drop_nexus(
1228 return 0; 1250 return 0;
1229} 1251}
1230 1252
1231static ssize_t tcm_vhost_tpg_show_nexus( 1253static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1232 struct se_portal_group *se_tpg,
1233 char *page) 1254 char *page)
1234{ 1255{
1235 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, 1256 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
@@ -1250,8 +1271,7 @@ static ssize_t tcm_vhost_tpg_show_nexus(
1250 return ret; 1271 return ret;
1251} 1272}
1252 1273
1253static ssize_t tcm_vhost_tpg_store_nexus( 1274static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1254 struct se_portal_group *se_tpg,
1255 const char *page, 1275 const char *page,
1256 size_t count) 1276 size_t count)
1257{ 1277{
@@ -1336,8 +1356,7 @@ static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1336 NULL, 1356 NULL,
1337}; 1357};
1338 1358
1339static struct se_portal_group *tcm_vhost_make_tpg( 1359static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
1340 struct se_wwn *wwn,
1341 struct config_group *group, 1360 struct config_group *group,
1342 const char *name) 1361 const char *name)
1343{ 1362{
@@ -1385,7 +1404,7 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1385 list_del(&tpg->tv_tpg_list); 1404 list_del(&tpg->tv_tpg_list);
1386 mutex_unlock(&tcm_vhost_mutex); 1405 mutex_unlock(&tcm_vhost_mutex);
1387 /* 1406 /*
1388 * Release the virtual I_T Nexus for this vHost TPG 1407 * Release the virtual I_T Nexus for this vhost TPG
1389 */ 1408 */
1390 tcm_vhost_drop_nexus(tpg); 1409 tcm_vhost_drop_nexus(tpg);
1391 /* 1410 /*
@@ -1395,8 +1414,7 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1395 kfree(tpg); 1414 kfree(tpg);
1396} 1415}
1397 1416
1398static struct se_wwn *tcm_vhost_make_tport( 1417static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
1399 struct target_fabric_configfs *tf,
1400 struct config_group *group, 1418 struct config_group *group,
1401 const char *name) 1419 const char *name)
1402{ 1420{
@@ -1592,7 +1610,10 @@ static void tcm_vhost_deregister_configfs(void)
1592static int __init tcm_vhost_init(void) 1610static int __init tcm_vhost_init(void)
1593{ 1611{
1594 int ret = -ENOMEM; 1612 int ret = -ENOMEM;
1595 1613 /*
1614 * Use our own dedicated workqueue for submitting I/O into
1615 * target core to avoid contention within system_wq.
1616 */
1596 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0); 1617 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
1597 if (!tcm_vhost_workqueue) 1618 if (!tcm_vhost_workqueue)
1598 goto out; 1619 goto out;
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
index c983ed21e413..d9e93557d669 100644
--- a/drivers/vhost/tcm_vhost.h
+++ b/drivers/vhost/tcm_vhost.h
@@ -47,9 +47,9 @@ struct tcm_vhost_tpg {
47 /* Vhost port target portal group tag for TCM */ 47 /* Vhost port target portal group tag for TCM */
48 u16 tport_tpgt; 48 u16 tport_tpgt;
49 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ 49 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
50 atomic_t tv_tpg_port_count; 50 int tv_tpg_port_count;
51 /* Used for vhost_scsi device reference to tpg_nexus */ 51 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
52 atomic_t tv_tpg_vhost_count; 52 int tv_tpg_vhost_count;
53 /* list for tcm_vhost_list */ 53 /* list for tcm_vhost_list */
54 struct list_head tv_tpg_list; 54 struct list_head tv_tpg_list;
55 /* Used to protect access for tpg_nexus */ 55 /* Used to protect access for tpg_nexus */
@@ -91,11 +91,13 @@ struct tcm_vhost_tport {
91 91
92struct vhost_scsi_target { 92struct vhost_scsi_target {
93 int abi_version; 93 int abi_version;
94 unsigned char vhost_wwpn[TRANSPORT_IQN_LEN]; 94 char vhost_wwpn[TRANSPORT_IQN_LEN];
95 unsigned short vhost_tpgt; 95 unsigned short vhost_tpgt;
96 unsigned short reserved;
96}; 97};
97 98
98/* VHOST_SCSI specific defines */ 99/* VHOST_SCSI specific defines */
99#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target) 100#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
100#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) 101#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
101#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target) 102/* Changing this breaks userspace. */
103#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int)
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 2e471c22abf5..88e92041d8f0 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -372,8 +372,15 @@ static void fb_flashcursor(struct work_struct *work)
372 struct vc_data *vc = NULL; 372 struct vc_data *vc = NULL;
373 int c; 373 int c;
374 int mode; 374 int mode;
375 int ret;
376
377 /* FIXME: we should sort out the unbind locking instead */
378 /* instead we just fail to flash the cursor if we can't get
379 * the lock instead of blocking fbcon deinit */
380 ret = console_trylock();
381 if (ret == 0)
382 return;
375 383
376 console_lock();
377 if (ops && ops->currcon != -1) 384 if (ops && ops->currcon != -1)
378 vc = vc_cons[ops->currcon].d; 385 vc = vc_cons[ops->currcon].d;
379 386
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 3fe82d0e8caa..5b06d31ab6a9 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -166,18 +166,17 @@ static long booke_wdt_ioctl(struct file *file,
166 166
167 switch (cmd) { 167 switch (cmd) {
168 case WDIOC_GETSUPPORT: 168 case WDIOC_GETSUPPORT:
169 if (copy_to_user((void *)arg, &ident, sizeof(ident))) 169 return copy_to_user(p, &ident, sizeof(ident)) ? -EFAULT : 0;
170 return -EFAULT;
171 case WDIOC_GETSTATUS: 170 case WDIOC_GETSTATUS:
172 return put_user(0, p); 171 return put_user(0, p);
173 case WDIOC_GETBOOTSTATUS: 172 case WDIOC_GETBOOTSTATUS:
174 /* XXX: something is clearing TSR */ 173 /* XXX: something is clearing TSR */
175 tmp = mfspr(SPRN_TSR) & TSR_WRS(3); 174 tmp = mfspr(SPRN_TSR) & TSR_WRS(3);
176 /* returns CARDRESET if last reset was caused by the WDT */ 175 /* returns CARDRESET if last reset was caused by the WDT */
177 return (tmp ? WDIOF_CARDRESET : 0); 176 return put_user((tmp ? WDIOF_CARDRESET : 0), p);
178 case WDIOC_SETOPTIONS: 177 case WDIOC_SETOPTIONS:
179 if (get_user(tmp, p)) 178 if (get_user(tmp, p))
180 return -EINVAL; 179 return -EFAULT;
181 if (tmp == WDIOS_ENABLECARD) { 180 if (tmp == WDIOS_ENABLECARD) {
182 booke_wdt_ping(); 181 booke_wdt_ping();
183 break; 182 break;
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index 3f75129eb0a9..f7abbaeebcaf 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -21,7 +21,6 @@
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/jiffies.h> 23#include <linux/jiffies.h>
24#include <linux/delay.h>
25 24
26#include <linux/mfd/da9052/reg.h> 25#include <linux/mfd/da9052/reg.h>
27#include <linux/mfd/da9052/da9052.h> 26#include <linux/mfd/da9052/da9052.h>
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index d4c50d63acbc..97ca359ae2bd 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -101,19 +101,6 @@ static int platform_pci_resume(struct pci_dev *pdev)
101 return 0; 101 return 0;
102} 102}
103 103
104static void __devinit prepare_shared_info(void)
105{
106#ifdef CONFIG_KEXEC
107 unsigned long addr;
108 struct shared_info *hvm_shared_info;
109
110 addr = alloc_xen_mmio(PAGE_SIZE);
111 hvm_shared_info = ioremap(addr, PAGE_SIZE);
112 memset(hvm_shared_info, 0, PAGE_SIZE);
113 xen_hvm_prepare_kexec(hvm_shared_info, addr >> PAGE_SHIFT);
114#endif
115}
116
117static int __devinit platform_pci_init(struct pci_dev *pdev, 104static int __devinit platform_pci_init(struct pci_dev *pdev,
118 const struct pci_device_id *ent) 105 const struct pci_device_id *ent)
119{ 106{
@@ -151,8 +138,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
151 platform_mmio = mmio_addr; 138 platform_mmio = mmio_addr;
152 platform_mmiolen = mmio_len; 139 platform_mmiolen = mmio_len;
153 140
154 prepare_shared_info();
155
156 if (!xen_have_vector_callback) { 141 if (!xen_have_vector_callback) {
157 ret = xen_allocate_irq(pdev); 142 ret = xen_allocate_irq(pdev);
158 if (ret) { 143 if (ret) {
diff --git a/fs/bio.c b/fs/bio.c
index 5eaa70c9d96e..71072ab99128 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -73,7 +73,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
73{ 73{
74 unsigned int sz = sizeof(struct bio) + extra_size; 74 unsigned int sz = sizeof(struct bio) + extra_size;
75 struct kmem_cache *slab = NULL; 75 struct kmem_cache *slab = NULL;
76 struct bio_slab *bslab; 76 struct bio_slab *bslab, *new_bio_slabs;
77 unsigned int i, entry = -1; 77 unsigned int i, entry = -1;
78 78
79 mutex_lock(&bio_slab_lock); 79 mutex_lock(&bio_slab_lock);
@@ -97,11 +97,12 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
97 97
98 if (bio_slab_nr == bio_slab_max && entry == -1) { 98 if (bio_slab_nr == bio_slab_max && entry == -1) {
99 bio_slab_max <<= 1; 99 bio_slab_max <<= 1;
100 bio_slabs = krealloc(bio_slabs, 100 new_bio_slabs = krealloc(bio_slabs,
101 bio_slab_max * sizeof(struct bio_slab), 101 bio_slab_max * sizeof(struct bio_slab),
102 GFP_KERNEL); 102 GFP_KERNEL);
103 if (!bio_slabs) 103 if (!new_bio_slabs)
104 goto out_unlock; 104 goto out_unlock;
105 bio_slabs = new_bio_slabs;
105 } 106 }
106 if (entry == -1) 107 if (entry == -1)
107 entry = bio_slab_nr++; 108 entry = bio_slab_nr++;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1e519195d45b..38e721b35d45 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1578,10 +1578,12 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
1578 unsigned long nr_segs, loff_t pos) 1578 unsigned long nr_segs, loff_t pos)
1579{ 1579{
1580 struct file *file = iocb->ki_filp; 1580 struct file *file = iocb->ki_filp;
1581 struct blk_plug plug;
1581 ssize_t ret; 1582 ssize_t ret;
1582 1583
1583 BUG_ON(iocb->ki_pos != pos); 1584 BUG_ON(iocb->ki_pos != pos);
1584 1585
1586 blk_start_plug(&plug);
1585 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); 1587 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
1586 if (ret > 0 || ret == -EIOCBQUEUED) { 1588 if (ret > 0 || ret == -EIOCBQUEUED) {
1587 ssize_t err; 1589 ssize_t err;
@@ -1590,6 +1592,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
1590 if (err < 0 && ret > 0) 1592 if (err < 0 && ret > 0)
1591 ret = err; 1593 ret = err;
1592 } 1594 }
1595 blk_finish_plug(&plug);
1593 return ret; 1596 return ret;
1594} 1597}
1595EXPORT_SYMBOL_GPL(blkdev_aio_write); 1598EXPORT_SYMBOL_GPL(blkdev_aio_write);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index a256f3b2a845..ff6475f409d6 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1438,10 +1438,10 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1438 ret = extent_from_logical(fs_info, logical, path, 1438 ret = extent_from_logical(fs_info, logical, path,
1439 &found_key); 1439 &found_key);
1440 btrfs_release_path(path); 1440 btrfs_release_path(path);
1441 if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1442 ret = -EINVAL;
1443 if (ret < 0) 1441 if (ret < 0)
1444 return ret; 1442 return ret;
1443 if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1444 return -EINVAL;
1445 1445
1446 extent_item_pos = logical - found_key.objectid; 1446 extent_item_pos = logical - found_key.objectid;
1447 ret = iterate_extent_inodes(fs_info, found_key.objectid, 1447 ret = iterate_extent_inodes(fs_info, found_key.objectid,
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 86eff48dab78..43d1c5a3a030 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -818,6 +818,7 @@ static void free_workspace(int type, struct list_head *workspace)
818 btrfs_compress_op[idx]->free_workspace(workspace); 818 btrfs_compress_op[idx]->free_workspace(workspace);
819 atomic_dec(alloc_workspace); 819 atomic_dec(alloc_workspace);
820wake: 820wake:
821 smp_mb();
821 if (waitqueue_active(workspace_wait)) 822 if (waitqueue_active(workspace_wait))
822 wake_up(workspace_wait); 823 wake_up(workspace_wait);
823} 824}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 9d7621f271ff..6d183f60d63a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -421,12 +421,6 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
421 spin_unlock(&fs_info->tree_mod_seq_lock); 421 spin_unlock(&fs_info->tree_mod_seq_lock);
422 422
423 /* 423 /*
424 * we removed the lowest blocker from the blocker list, so there may be
425 * more processible delayed refs.
426 */
427 wake_up(&fs_info->tree_mod_seq_wait);
428
429 /*
430 * anything that's lower than the lowest existing (read: blocked) 424 * anything that's lower than the lowest existing (read: blocked)
431 * sequence number can be removed from the tree. 425 * sequence number can be removed from the tree.
432 */ 426 */
@@ -631,6 +625,9 @@ __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
631 u32 nritems; 625 u32 nritems;
632 int ret; 626 int ret;
633 627
628 if (btrfs_header_level(eb) == 0)
629 return;
630
634 nritems = btrfs_header_nritems(eb); 631 nritems = btrfs_header_nritems(eb);
635 for (i = nritems - 1; i >= 0; i--) { 632 for (i = nritems - 1; i >= 0; i--) {
636 ret = tree_mod_log_insert_key_locked(fs_info, eb, i, 633 ret = tree_mod_log_insert_key_locked(fs_info, eb, i,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 4bab807227ad..0d195b507660 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1252,7 +1252,6 @@ struct btrfs_fs_info {
1252 atomic_t tree_mod_seq; 1252 atomic_t tree_mod_seq;
1253 struct list_head tree_mod_seq_list; 1253 struct list_head tree_mod_seq_list;
1254 struct seq_list tree_mod_seq_elem; 1254 struct seq_list tree_mod_seq_elem;
1255 wait_queue_head_t tree_mod_seq_wait;
1256 1255
1257 /* this protects tree_mod_log */ 1256 /* this protects tree_mod_log */
1258 rwlock_t tree_mod_log_lock; 1257 rwlock_t tree_mod_log_lock;
@@ -3192,7 +3191,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
3192int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, 3191int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
3193 struct bio *bio, u32 *dst); 3192 struct bio *bio, u32 *dst);
3194int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, 3193int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
3195 struct bio *bio, u64 logical_offset, u32 *dst); 3194 struct bio *bio, u64 logical_offset);
3196int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 3195int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
3197 struct btrfs_root *root, 3196 struct btrfs_root *root,
3198 u64 objectid, u64 pos, 3197 u64 objectid, u64 pos,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 335605c8ceab..07d5eeb1e6f1 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -512,8 +512,8 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
512 512
513 rb_erase(&delayed_item->rb_node, root); 513 rb_erase(&delayed_item->rb_node, root);
514 delayed_item->delayed_node->count--; 514 delayed_item->delayed_node->count--;
515 atomic_dec(&delayed_root->items); 515 if (atomic_dec_return(&delayed_root->items) <
516 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && 516 BTRFS_DELAYED_BACKGROUND &&
517 waitqueue_active(&delayed_root->wait)) 517 waitqueue_active(&delayed_root->wait))
518 wake_up(&delayed_root->wait); 518 wake_up(&delayed_root->wait);
519} 519}
@@ -1028,9 +1028,10 @@ do_again:
1028 btrfs_release_delayed_item(prev); 1028 btrfs_release_delayed_item(prev);
1029 ret = 0; 1029 ret = 0;
1030 btrfs_release_path(path); 1030 btrfs_release_path(path);
1031 if (curr) 1031 if (curr) {
1032 mutex_unlock(&node->mutex);
1032 goto do_again; 1033 goto do_again;
1033 else 1034 } else
1034 goto delete_fail; 1035 goto delete_fail;
1035 } 1036 }
1036 1037
@@ -1055,8 +1056,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1055 delayed_node->count--; 1056 delayed_node->count--;
1056 1057
1057 delayed_root = delayed_node->root->fs_info->delayed_root; 1058 delayed_root = delayed_node->root->fs_info->delayed_root;
1058 atomic_dec(&delayed_root->items); 1059 if (atomic_dec_return(&delayed_root->items) <
1059 if (atomic_read(&delayed_root->items) <
1060 BTRFS_DELAYED_BACKGROUND && 1060 BTRFS_DELAYED_BACKGROUND &&
1061 waitqueue_active(&delayed_root->wait)) 1061 waitqueue_active(&delayed_root->wait))
1062 wake_up(&delayed_root->wait); 1062 wake_up(&delayed_root->wait);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index da7419ed01bb..ae9411773397 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -38,17 +38,14 @@
38static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, 38static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
39 struct btrfs_delayed_tree_ref *ref1) 39 struct btrfs_delayed_tree_ref *ref1)
40{ 40{
41 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) { 41 if (ref1->root < ref2->root)
42 if (ref1->root < ref2->root) 42 return -1;
43 return -1; 43 if (ref1->root > ref2->root)
44 if (ref1->root > ref2->root) 44 return 1;
45 return 1; 45 if (ref1->parent < ref2->parent)
46 } else { 46 return -1;
47 if (ref1->parent < ref2->parent) 47 if (ref1->parent > ref2->parent)
48 return -1; 48 return 1;
49 if (ref1->parent > ref2->parent)
50 return 1;
51 }
52 return 0; 49 return 0;
53} 50}
54 51
@@ -85,7 +82,8 @@ static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
85 * type of the delayed backrefs and content of delayed backrefs. 82 * type of the delayed backrefs and content of delayed backrefs.
86 */ 83 */
87static int comp_entry(struct btrfs_delayed_ref_node *ref2, 84static int comp_entry(struct btrfs_delayed_ref_node *ref2,
88 struct btrfs_delayed_ref_node *ref1) 85 struct btrfs_delayed_ref_node *ref1,
86 bool compare_seq)
89{ 87{
90 if (ref1->bytenr < ref2->bytenr) 88 if (ref1->bytenr < ref2->bytenr)
91 return -1; 89 return -1;
@@ -102,10 +100,12 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2,
102 if (ref1->type > ref2->type) 100 if (ref1->type > ref2->type)
103 return 1; 101 return 1;
104 /* merging of sequenced refs is not allowed */ 102 /* merging of sequenced refs is not allowed */
105 if (ref1->seq < ref2->seq) 103 if (compare_seq) {
106 return -1; 104 if (ref1->seq < ref2->seq)
107 if (ref1->seq > ref2->seq) 105 return -1;
108 return 1; 106 if (ref1->seq > ref2->seq)
107 return 1;
108 }
109 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || 109 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
110 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) { 110 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
111 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2), 111 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
@@ -139,7 +139,7 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
139 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node, 139 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
140 rb_node); 140 rb_node);
141 141
142 cmp = comp_entry(entry, ins); 142 cmp = comp_entry(entry, ins, 1);
143 if (cmp < 0) 143 if (cmp < 0)
144 p = &(*p)->rb_left; 144 p = &(*p)->rb_left;
145 else if (cmp > 0) 145 else if (cmp > 0)
@@ -233,6 +233,114 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
233 return 0; 233 return 0;
234} 234}
235 235
236static void inline drop_delayed_ref(struct btrfs_trans_handle *trans,
237 struct btrfs_delayed_ref_root *delayed_refs,
238 struct btrfs_delayed_ref_node *ref)
239{
240 rb_erase(&ref->rb_node, &delayed_refs->root);
241 ref->in_tree = 0;
242 btrfs_put_delayed_ref(ref);
243 delayed_refs->num_entries--;
244 if (trans->delayed_ref_updates)
245 trans->delayed_ref_updates--;
246}
247
248static int merge_ref(struct btrfs_trans_handle *trans,
249 struct btrfs_delayed_ref_root *delayed_refs,
250 struct btrfs_delayed_ref_node *ref, u64 seq)
251{
252 struct rb_node *node;
253 int merged = 0;
254 int mod = 0;
255 int done = 0;
256
257 node = rb_prev(&ref->rb_node);
258 while (node) {
259 struct btrfs_delayed_ref_node *next;
260
261 next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
262 node = rb_prev(node);
263 if (next->bytenr != ref->bytenr)
264 break;
265 if (seq && next->seq >= seq)
266 break;
267 if (comp_entry(ref, next, 0))
268 continue;
269
270 if (ref->action == next->action) {
271 mod = next->ref_mod;
272 } else {
273 if (ref->ref_mod < next->ref_mod) {
274 struct btrfs_delayed_ref_node *tmp;
275
276 tmp = ref;
277 ref = next;
278 next = tmp;
279 done = 1;
280 }
281 mod = -next->ref_mod;
282 }
283
284 merged++;
285 drop_delayed_ref(trans, delayed_refs, next);
286 ref->ref_mod += mod;
287 if (ref->ref_mod == 0) {
288 drop_delayed_ref(trans, delayed_refs, ref);
289 break;
290 } else {
291 /*
292 * You can't have multiples of the same ref on a tree
293 * block.
294 */
295 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
296 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
297 }
298
299 if (done)
300 break;
301 node = rb_prev(&ref->rb_node);
302 }
303
304 return merged;
305}
306
307void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
308 struct btrfs_fs_info *fs_info,
309 struct btrfs_delayed_ref_root *delayed_refs,
310 struct btrfs_delayed_ref_head *head)
311{
312 struct rb_node *node;
313 u64 seq = 0;
314
315 spin_lock(&fs_info->tree_mod_seq_lock);
316 if (!list_empty(&fs_info->tree_mod_seq_list)) {
317 struct seq_list *elem;
318
319 elem = list_first_entry(&fs_info->tree_mod_seq_list,
320 struct seq_list, list);
321 seq = elem->seq;
322 }
323 spin_unlock(&fs_info->tree_mod_seq_lock);
324
325 node = rb_prev(&head->node.rb_node);
326 while (node) {
327 struct btrfs_delayed_ref_node *ref;
328
329 ref = rb_entry(node, struct btrfs_delayed_ref_node,
330 rb_node);
331 if (ref->bytenr != head->node.bytenr)
332 break;
333
334 /* We can't merge refs that are outside of our seq count */
335 if (seq && ref->seq >= seq)
336 break;
337 if (merge_ref(trans, delayed_refs, ref, seq))
338 node = rb_prev(&head->node.rb_node);
339 else
340 node = rb_prev(node);
341 }
342}
343
236int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, 344int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
237 struct btrfs_delayed_ref_root *delayed_refs, 345 struct btrfs_delayed_ref_root *delayed_refs,
238 u64 seq) 346 u64 seq)
@@ -336,18 +444,11 @@ update_existing_ref(struct btrfs_trans_handle *trans,
336 * every changing the extent allocation tree. 444 * every changing the extent allocation tree.
337 */ 445 */
338 existing->ref_mod--; 446 existing->ref_mod--;
339 if (existing->ref_mod == 0) { 447 if (existing->ref_mod == 0)
340 rb_erase(&existing->rb_node, 448 drop_delayed_ref(trans, delayed_refs, existing);
341 &delayed_refs->root); 449 else
342 existing->in_tree = 0;
343 btrfs_put_delayed_ref(existing);
344 delayed_refs->num_entries--;
345 if (trans->delayed_ref_updates)
346 trans->delayed_ref_updates--;
347 } else {
348 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || 450 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
349 existing->type == BTRFS_SHARED_BLOCK_REF_KEY); 451 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
350 }
351 } else { 452 } else {
352 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || 453 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
353 existing->type == BTRFS_SHARED_BLOCK_REF_KEY); 454 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
@@ -662,9 +763,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
662 add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr, 763 add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
663 num_bytes, parent, ref_root, level, action, 764 num_bytes, parent, ref_root, level, action,
664 for_cow); 765 for_cow);
665 if (!need_ref_seq(for_cow, ref_root) &&
666 waitqueue_active(&fs_info->tree_mod_seq_wait))
667 wake_up(&fs_info->tree_mod_seq_wait);
668 spin_unlock(&delayed_refs->lock); 766 spin_unlock(&delayed_refs->lock);
669 if (need_ref_seq(for_cow, ref_root)) 767 if (need_ref_seq(for_cow, ref_root))
670 btrfs_qgroup_record_ref(trans, &ref->node, extent_op); 768 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
@@ -713,9 +811,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
713 add_delayed_data_ref(fs_info, trans, &ref->node, bytenr, 811 add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
714 num_bytes, parent, ref_root, owner, offset, 812 num_bytes, parent, ref_root, owner, offset,
715 action, for_cow); 813 action, for_cow);
716 if (!need_ref_seq(for_cow, ref_root) &&
717 waitqueue_active(&fs_info->tree_mod_seq_wait))
718 wake_up(&fs_info->tree_mod_seq_wait);
719 spin_unlock(&delayed_refs->lock); 814 spin_unlock(&delayed_refs->lock);
720 if (need_ref_seq(for_cow, ref_root)) 815 if (need_ref_seq(for_cow, ref_root))
721 btrfs_qgroup_record_ref(trans, &ref->node, extent_op); 816 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
@@ -744,8 +839,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
744 num_bytes, BTRFS_UPDATE_DELAYED_HEAD, 839 num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
745 extent_op->is_data); 840 extent_op->is_data);
746 841
747 if (waitqueue_active(&fs_info->tree_mod_seq_wait))
748 wake_up(&fs_info->tree_mod_seq_wait);
749 spin_unlock(&delayed_refs->lock); 842 spin_unlock(&delayed_refs->lock);
750 return 0; 843 return 0;
751} 844}
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 0d7c90c366b6..ab5300595847 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -167,6 +167,10 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
167 struct btrfs_trans_handle *trans, 167 struct btrfs_trans_handle *trans,
168 u64 bytenr, u64 num_bytes, 168 u64 bytenr, u64 num_bytes,
169 struct btrfs_delayed_extent_op *extent_op); 169 struct btrfs_delayed_extent_op *extent_op);
170void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
171 struct btrfs_fs_info *fs_info,
172 struct btrfs_delayed_ref_root *delayed_refs,
173 struct btrfs_delayed_ref_head *head);
170 174
171struct btrfs_delayed_ref_head * 175struct btrfs_delayed_ref_head *
172btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); 176btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 62e0cafd6e25..22e98e04c2ea 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -377,9 +377,13 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
377 ret = read_extent_buffer_pages(io_tree, eb, start, 377 ret = read_extent_buffer_pages(io_tree, eb, start,
378 WAIT_COMPLETE, 378 WAIT_COMPLETE,
379 btree_get_extent, mirror_num); 379 btree_get_extent, mirror_num);
380 if (!ret && !verify_parent_transid(io_tree, eb, 380 if (!ret) {
381 if (!verify_parent_transid(io_tree, eb,
381 parent_transid, 0)) 382 parent_transid, 0))
382 break; 383 break;
384 else
385 ret = -EIO;
386 }
383 387
384 /* 388 /*
385 * This buffer's crc is fine, but its contents are corrupted, so 389 * This buffer's crc is fine, but its contents are corrupted, so
@@ -754,9 +758,7 @@ static void run_one_async_done(struct btrfs_work *work)
754 limit = btrfs_async_submit_limit(fs_info); 758 limit = btrfs_async_submit_limit(fs_info);
755 limit = limit * 2 / 3; 759 limit = limit * 2 / 3;
756 760
757 atomic_dec(&fs_info->nr_async_submits); 761 if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
758
759 if (atomic_read(&fs_info->nr_async_submits) < limit &&
760 waitqueue_active(&fs_info->async_submit_wait)) 762 waitqueue_active(&fs_info->async_submit_wait))
761 wake_up(&fs_info->async_submit_wait); 763 wake_up(&fs_info->async_submit_wait);
762 764
@@ -2032,8 +2034,6 @@ int open_ctree(struct super_block *sb,
2032 fs_info->free_chunk_space = 0; 2034 fs_info->free_chunk_space = 0;
2033 fs_info->tree_mod_log = RB_ROOT; 2035 fs_info->tree_mod_log = RB_ROOT;
2034 2036
2035 init_waitqueue_head(&fs_info->tree_mod_seq_wait);
2036
2037 /* readahead state */ 2037 /* readahead state */
2038 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); 2038 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
2039 spin_lock_init(&fs_info->reada_lock); 2039 spin_lock_init(&fs_info->reada_lock);
@@ -2528,8 +2528,7 @@ retry_root_backup:
2528 goto fail_trans_kthread; 2528 goto fail_trans_kthread;
2529 2529
2530 /* do not make disk changes in broken FS */ 2530 /* do not make disk changes in broken FS */
2531 if (btrfs_super_log_root(disk_super) != 0 && 2531 if (btrfs_super_log_root(disk_super) != 0) {
2532 !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
2533 u64 bytenr = btrfs_super_log_root(disk_super); 2532 u64 bytenr = btrfs_super_log_root(disk_super);
2534 2533
2535 if (fs_devices->rw_devices == 0) { 2534 if (fs_devices->rw_devices == 0) {
@@ -3189,30 +3188,14 @@ int close_ctree(struct btrfs_root *root)
3189 /* clear out the rbtree of defraggable inodes */ 3188 /* clear out the rbtree of defraggable inodes */
3190 btrfs_run_defrag_inodes(fs_info); 3189 btrfs_run_defrag_inodes(fs_info);
3191 3190
3192 /*
3193 * Here come 2 situations when btrfs is broken to flip readonly:
3194 *
3195 * 1. when btrfs flips readonly somewhere else before
3196 * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
3197 * and btrfs will skip to write sb directly to keep
3198 * ERROR state on disk.
3199 *
3200 * 2. when btrfs flips readonly just in btrfs_commit_super,
3201 * and in such case, btrfs cannot write sb via btrfs_commit_super,
3202 * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
3203 * btrfs will cleanup all FS resources first and write sb then.
3204 */
3205 if (!(fs_info->sb->s_flags & MS_RDONLY)) { 3191 if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3206 ret = btrfs_commit_super(root); 3192 ret = btrfs_commit_super(root);
3207 if (ret) 3193 if (ret)
3208 printk(KERN_ERR "btrfs: commit super ret %d\n", ret); 3194 printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3209 } 3195 }
3210 3196
3211 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 3197 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
3212 ret = btrfs_error_commit_super(root); 3198 btrfs_error_commit_super(root);
3213 if (ret)
3214 printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3215 }
3216 3199
3217 btrfs_put_block_group_cache(fs_info); 3200 btrfs_put_block_group_cache(fs_info);
3218 3201
@@ -3434,18 +3417,11 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3434 if (read_only) 3417 if (read_only)
3435 return 0; 3418 return 0;
3436 3419
3437 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
3438 printk(KERN_WARNING "warning: mount fs with errors, "
3439 "running btrfsck is recommended\n");
3440 }
3441
3442 return 0; 3420 return 0;
3443} 3421}
3444 3422
3445int btrfs_error_commit_super(struct btrfs_root *root) 3423void btrfs_error_commit_super(struct btrfs_root *root)
3446{ 3424{
3447 int ret;
3448
3449 mutex_lock(&root->fs_info->cleaner_mutex); 3425 mutex_lock(&root->fs_info->cleaner_mutex);
3450 btrfs_run_delayed_iputs(root); 3426 btrfs_run_delayed_iputs(root);
3451 mutex_unlock(&root->fs_info->cleaner_mutex); 3427 mutex_unlock(&root->fs_info->cleaner_mutex);
@@ -3455,10 +3431,6 @@ int btrfs_error_commit_super(struct btrfs_root *root)
3455 3431
3456 /* cleanup FS via transaction */ 3432 /* cleanup FS via transaction */
3457 btrfs_cleanup_transaction(root); 3433 btrfs_cleanup_transaction(root);
3458
3459 ret = write_ctree_super(NULL, root, 0);
3460
3461 return ret;
3462} 3434}
3463 3435
3464static void btrfs_destroy_ordered_operations(struct btrfs_root *root) 3436static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
@@ -3782,14 +3754,17 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
3782 /* FIXME: cleanup wait for commit */ 3754 /* FIXME: cleanup wait for commit */
3783 t->in_commit = 1; 3755 t->in_commit = 1;
3784 t->blocked = 1; 3756 t->blocked = 1;
3757 smp_mb();
3785 if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) 3758 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3786 wake_up(&root->fs_info->transaction_blocked_wait); 3759 wake_up(&root->fs_info->transaction_blocked_wait);
3787 3760
3788 t->blocked = 0; 3761 t->blocked = 0;
3762 smp_mb();
3789 if (waitqueue_active(&root->fs_info->transaction_wait)) 3763 if (waitqueue_active(&root->fs_info->transaction_wait))
3790 wake_up(&root->fs_info->transaction_wait); 3764 wake_up(&root->fs_info->transaction_wait);
3791 3765
3792 t->commit_done = 1; 3766 t->commit_done = 1;
3767 smp_mb();
3793 if (waitqueue_active(&t->commit_wait)) 3768 if (waitqueue_active(&t->commit_wait))
3794 wake_up(&t->commit_wait); 3769 wake_up(&t->commit_wait);
3795 3770
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 95e147eea239..c5b00a735fef 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -54,7 +54,7 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
54 struct btrfs_root *root, int max_mirrors); 54 struct btrfs_root *root, int max_mirrors);
55struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); 55struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
56int btrfs_commit_super(struct btrfs_root *root); 56int btrfs_commit_super(struct btrfs_root *root);
57int btrfs_error_commit_super(struct btrfs_root *root); 57void btrfs_error_commit_super(struct btrfs_root *root);
58struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, 58struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
59 u64 bytenr, u32 blocksize); 59 u64 bytenr, u32 blocksize);
60struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, 60struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4e1b153b7c47..ba58024d40d3 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2252,6 +2252,16 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2252 } 2252 }
2253 2253
2254 /* 2254 /*
2255 * We need to try and merge add/drops of the same ref since we
2256 * can run into issues with relocate dropping the implicit ref
2257 * and then it being added back again before the drop can
2258 * finish. If we merged anything we need to re-loop so we can
2259 * get a good ref.
2260 */
2261 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2262 locked_ref);
2263
2264 /*
2255 * locked_ref is the head node, so we have to go one 2265 * locked_ref is the head node, so we have to go one
2256 * node back for any delayed ref updates 2266 * node back for any delayed ref updates
2257 */ 2267 */
@@ -2318,12 +2328,23 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2318 ref->in_tree = 0; 2328 ref->in_tree = 0;
2319 rb_erase(&ref->rb_node, &delayed_refs->root); 2329 rb_erase(&ref->rb_node, &delayed_refs->root);
2320 delayed_refs->num_entries--; 2330 delayed_refs->num_entries--;
2321 /* 2331 if (locked_ref) {
2322 * we modified num_entries, but as we're currently running 2332 /*
2323 * delayed refs, skip 2333 * when we play the delayed ref, also correct the
2324 * wake_up(&delayed_refs->seq_wait); 2334 * ref_mod on head
2325 * here. 2335 */
2326 */ 2336 switch (ref->action) {
2337 case BTRFS_ADD_DELAYED_REF:
2338 case BTRFS_ADD_DELAYED_EXTENT:
2339 locked_ref->node.ref_mod -= ref->ref_mod;
2340 break;
2341 case BTRFS_DROP_DELAYED_REF:
2342 locked_ref->node.ref_mod += ref->ref_mod;
2343 break;
2344 default:
2345 WARN_ON(1);
2346 }
2347 }
2327 spin_unlock(&delayed_refs->lock); 2348 spin_unlock(&delayed_refs->lock);
2328 2349
2329 ret = run_one_delayed_ref(trans, root, ref, extent_op, 2350 ret = run_one_delayed_ref(trans, root, ref, extent_op,
@@ -2350,22 +2371,6 @@ next:
2350 return count; 2371 return count;
2351} 2372}
2352 2373
2353static void wait_for_more_refs(struct btrfs_fs_info *fs_info,
2354 struct btrfs_delayed_ref_root *delayed_refs,
2355 unsigned long num_refs,
2356 struct list_head *first_seq)
2357{
2358 spin_unlock(&delayed_refs->lock);
2359 pr_debug("waiting for more refs (num %ld, first %p)\n",
2360 num_refs, first_seq);
2361 wait_event(fs_info->tree_mod_seq_wait,
2362 num_refs != delayed_refs->num_entries ||
2363 fs_info->tree_mod_seq_list.next != first_seq);
2364 pr_debug("done waiting for more refs (num %ld, first %p)\n",
2365 delayed_refs->num_entries, fs_info->tree_mod_seq_list.next);
2366 spin_lock(&delayed_refs->lock);
2367}
2368
2369#ifdef SCRAMBLE_DELAYED_REFS 2374#ifdef SCRAMBLE_DELAYED_REFS
2370/* 2375/*
2371 * Normally delayed refs get processed in ascending bytenr order. This 2376 * Normally delayed refs get processed in ascending bytenr order. This
@@ -2460,13 +2465,11 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2460 struct btrfs_delayed_ref_root *delayed_refs; 2465 struct btrfs_delayed_ref_root *delayed_refs;
2461 struct btrfs_delayed_ref_node *ref; 2466 struct btrfs_delayed_ref_node *ref;
2462 struct list_head cluster; 2467 struct list_head cluster;
2463 struct list_head *first_seq = NULL;
2464 int ret; 2468 int ret;
2465 u64 delayed_start; 2469 u64 delayed_start;
2466 int run_all = count == (unsigned long)-1; 2470 int run_all = count == (unsigned long)-1;
2467 int run_most = 0; 2471 int run_most = 0;
2468 unsigned long num_refs = 0; 2472 int loops;
2469 int consider_waiting;
2470 2473
2471 /* We'll clean this up in btrfs_cleanup_transaction */ 2474 /* We'll clean this up in btrfs_cleanup_transaction */
2472 if (trans->aborted) 2475 if (trans->aborted)
@@ -2484,7 +2487,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2484 delayed_refs = &trans->transaction->delayed_refs; 2487 delayed_refs = &trans->transaction->delayed_refs;
2485 INIT_LIST_HEAD(&cluster); 2488 INIT_LIST_HEAD(&cluster);
2486again: 2489again:
2487 consider_waiting = 0; 2490 loops = 0;
2488 spin_lock(&delayed_refs->lock); 2491 spin_lock(&delayed_refs->lock);
2489 2492
2490#ifdef SCRAMBLE_DELAYED_REFS 2493#ifdef SCRAMBLE_DELAYED_REFS
@@ -2512,31 +2515,6 @@ again:
2512 if (ret) 2515 if (ret)
2513 break; 2516 break;
2514 2517
2515 if (delayed_start >= delayed_refs->run_delayed_start) {
2516 if (consider_waiting == 0) {
2517 /*
2518 * btrfs_find_ref_cluster looped. let's do one
2519 * more cycle. if we don't run any delayed ref
2520 * during that cycle (because we can't because
2521 * all of them are blocked) and if the number of
2522 * refs doesn't change, we avoid busy waiting.
2523 */
2524 consider_waiting = 1;
2525 num_refs = delayed_refs->num_entries;
2526 first_seq = root->fs_info->tree_mod_seq_list.next;
2527 } else {
2528 wait_for_more_refs(root->fs_info, delayed_refs,
2529 num_refs, first_seq);
2530 /*
2531 * after waiting, things have changed. we
2532 * dropped the lock and someone else might have
2533 * run some refs, built new clusters and so on.
2534 * therefore, we restart staleness detection.
2535 */
2536 consider_waiting = 0;
2537 }
2538 }
2539
2540 ret = run_clustered_refs(trans, root, &cluster); 2518 ret = run_clustered_refs(trans, root, &cluster);
2541 if (ret < 0) { 2519 if (ret < 0) {
2542 spin_unlock(&delayed_refs->lock); 2520 spin_unlock(&delayed_refs->lock);
@@ -2549,9 +2527,26 @@ again:
2549 if (count == 0) 2527 if (count == 0)
2550 break; 2528 break;
2551 2529
2552 if (ret || delayed_refs->run_delayed_start == 0) { 2530 if (delayed_start >= delayed_refs->run_delayed_start) {
2531 if (loops == 0) {
2532 /*
2533 * btrfs_find_ref_cluster looped. let's do one
2534 * more cycle. if we don't run any delayed ref
2535 * during that cycle (because we can't because
2536 * all of them are blocked), bail out.
2537 */
2538 loops = 1;
2539 } else {
2540 /*
2541 * no runnable refs left, stop trying
2542 */
2543 BUG_ON(run_all);
2544 break;
2545 }
2546 }
2547 if (ret) {
2553 /* refs were run, let's reset staleness detection */ 2548 /* refs were run, let's reset staleness detection */
2554 consider_waiting = 0; 2549 loops = 0;
2555 } 2550 }
2556 } 2551 }
2557 2552
@@ -3007,17 +3002,16 @@ again:
3007 } 3002 }
3008 spin_unlock(&block_group->lock); 3003 spin_unlock(&block_group->lock);
3009 3004
3010 num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024); 3005 /*
3006 * Try to preallocate enough space based on how big the block group is.
3007 * Keep in mind this has to include any pinned space which could end up
3008 * taking up quite a bit since it's not folded into the other space
3009 * cache.
3010 */
3011 num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3011 if (!num_pages) 3012 if (!num_pages)
3012 num_pages = 1; 3013 num_pages = 1;
3013 3014
3014 /*
3015 * Just to make absolutely sure we have enough space, we're going to
3016 * preallocate 12 pages worth of space for each block group. In
3017 * practice we ought to use at most 8, but we need extra space so we can
3018 * add our header and have a terminator between the extents and the
3019 * bitmaps.
3020 */
3021 num_pages *= 16; 3015 num_pages *= 16;
3022 num_pages *= PAGE_CACHE_SIZE; 3016 num_pages *= PAGE_CACHE_SIZE;
3023 3017
@@ -4571,8 +4565,10 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4571 if (root->fs_info->quota_enabled) { 4565 if (root->fs_info->quota_enabled) {
4572 ret = btrfs_qgroup_reserve(root, num_bytes + 4566 ret = btrfs_qgroup_reserve(root, num_bytes +
4573 nr_extents * root->leafsize); 4567 nr_extents * root->leafsize);
4574 if (ret) 4568 if (ret) {
4569 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4575 return ret; 4570 return ret;
4571 }
4576 } 4572 }
4577 4573
4578 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); 4574 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
@@ -5294,9 +5290,6 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5294 rb_erase(&head->node.rb_node, &delayed_refs->root); 5290 rb_erase(&head->node.rb_node, &delayed_refs->root);
5295 5291
5296 delayed_refs->num_entries--; 5292 delayed_refs->num_entries--;
5297 smp_mb();
5298 if (waitqueue_active(&root->fs_info->tree_mod_seq_wait))
5299 wake_up(&root->fs_info->tree_mod_seq_wait);
5300 5293
5301 /* 5294 /*
5302 * we don't take a ref on the node because we're removing it from the 5295 * we don't take a ref on the node because we're removing it from the
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 45c81bb4ac82..4c878476bb91 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2330,23 +2330,10 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2330 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 2330 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2331 ret = tree->ops->readpage_end_io_hook(page, start, end, 2331 ret = tree->ops->readpage_end_io_hook(page, start, end,
2332 state, mirror); 2332 state, mirror);
2333 if (ret) { 2333 if (ret)
2334 /* no IO indicated but software detected errors
2335 * in the block, either checksum errors or
2336 * issues with the contents */
2337 struct btrfs_root *root =
2338 BTRFS_I(page->mapping->host)->root;
2339 struct btrfs_device *device;
2340
2341 uptodate = 0; 2334 uptodate = 0;
2342 device = btrfs_find_device_for_logical( 2335 else
2343 root, start, mirror);
2344 if (device)
2345 btrfs_dev_stat_inc_and_print(device,
2346 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2347 } else {
2348 clean_io_failure(start, page); 2336 clean_io_failure(start, page);
2349 }
2350 } 2337 }
2351 2338
2352 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { 2339 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index b45b9de0c21d..857d93cd01dc 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -272,9 +272,9 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
272} 272}
273 273
274int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, 274int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
275 struct bio *bio, u64 offset, u32 *dst) 275 struct bio *bio, u64 offset)
276{ 276{
277 return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1); 277 return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
278} 278}
279 279
280int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 280int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 6e8f416773d4..ec154f954646 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1008,9 +1008,7 @@ static noinline void async_cow_submit(struct btrfs_work *work)
1008 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> 1008 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1009 PAGE_CACHE_SHIFT; 1009 PAGE_CACHE_SHIFT;
1010 1010
1011 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); 1011 if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1012
1013 if (atomic_read(&root->fs_info->async_delalloc_pages) <
1014 5 * 1024 * 1024 && 1012 5 * 1024 * 1024 &&
1015 waitqueue_active(&root->fs_info->async_submit_wait)) 1013 waitqueue_active(&root->fs_info->async_submit_wait))
1016 wake_up(&root->fs_info->async_submit_wait); 1014 wake_up(&root->fs_info->async_submit_wait);
@@ -1885,8 +1883,11 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
1885 trans = btrfs_join_transaction_nolock(root); 1883 trans = btrfs_join_transaction_nolock(root);
1886 else 1884 else
1887 trans = btrfs_join_transaction(root); 1885 trans = btrfs_join_transaction(root);
1888 if (IS_ERR(trans)) 1886 if (IS_ERR(trans)) {
1889 return PTR_ERR(trans); 1887 ret = PTR_ERR(trans);
1888 trans = NULL;
1889 goto out;
1890 }
1890 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1891 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1891 ret = btrfs_update_inode_fallback(trans, root, inode); 1892 ret = btrfs_update_inode_fallback(trans, root, inode);
1892 if (ret) /* -ENOMEM or corruption */ 1893 if (ret) /* -ENOMEM or corruption */
@@ -3174,7 +3175,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3174 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 3175 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3175 inode_inc_iversion(dir); 3176 inode_inc_iversion(dir);
3176 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 3177 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3177 ret = btrfs_update_inode(trans, root, dir); 3178 ret = btrfs_update_inode_fallback(trans, root, dir);
3178 if (ret) 3179 if (ret)
3179 btrfs_abort_transaction(trans, root, ret); 3180 btrfs_abort_transaction(trans, root, ret);
3180out: 3181out:
@@ -5774,18 +5775,112 @@ out:
5774 return ret; 5775 return ret;
5775} 5776}
5776 5777
5778static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
5779 struct extent_state **cached_state, int writing)
5780{
5781 struct btrfs_ordered_extent *ordered;
5782 int ret = 0;
5783
5784 while (1) {
5785 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
5786 0, cached_state);
5787 /*
5788 * We're concerned with the entire range that we're going to be
5789 * doing DIO to, so we need to make sure theres no ordered
5790 * extents in this range.
5791 */
5792 ordered = btrfs_lookup_ordered_range(inode, lockstart,
5793 lockend - lockstart + 1);
5794
5795 /*
5796 * We need to make sure there are no buffered pages in this
5797 * range either, we could have raced between the invalidate in
5798 * generic_file_direct_write and locking the extent. The
5799 * invalidate needs to happen so that reads after a write do not
5800 * get stale data.
5801 */
5802 if (!ordered && (!writing ||
5803 !test_range_bit(&BTRFS_I(inode)->io_tree,
5804 lockstart, lockend, EXTENT_UPTODATE, 0,
5805 *cached_state)))
5806 break;
5807
5808 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
5809 cached_state, GFP_NOFS);
5810
5811 if (ordered) {
5812 btrfs_start_ordered_extent(inode, ordered, 1);
5813 btrfs_put_ordered_extent(ordered);
5814 } else {
5815 /* Screw you mmap */
5816 ret = filemap_write_and_wait_range(inode->i_mapping,
5817 lockstart,
5818 lockend);
5819 if (ret)
5820 break;
5821
5822 /*
5823 * If we found a page that couldn't be invalidated just
5824 * fall back to buffered.
5825 */
5826 ret = invalidate_inode_pages2_range(inode->i_mapping,
5827 lockstart >> PAGE_CACHE_SHIFT,
5828 lockend >> PAGE_CACHE_SHIFT);
5829 if (ret)
5830 break;
5831 }
5832
5833 cond_resched();
5834 }
5835
5836 return ret;
5837}
5838
5777static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, 5839static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5778 struct buffer_head *bh_result, int create) 5840 struct buffer_head *bh_result, int create)
5779{ 5841{
5780 struct extent_map *em; 5842 struct extent_map *em;
5781 struct btrfs_root *root = BTRFS_I(inode)->root; 5843 struct btrfs_root *root = BTRFS_I(inode)->root;
5844 struct extent_state *cached_state = NULL;
5782 u64 start = iblock << inode->i_blkbits; 5845 u64 start = iblock << inode->i_blkbits;
5846 u64 lockstart, lockend;
5783 u64 len = bh_result->b_size; 5847 u64 len = bh_result->b_size;
5784 struct btrfs_trans_handle *trans; 5848 struct btrfs_trans_handle *trans;
5849 int unlock_bits = EXTENT_LOCKED;
5850 int ret;
5851
5852 if (create) {
5853 ret = btrfs_delalloc_reserve_space(inode, len);
5854 if (ret)
5855 return ret;
5856 unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
5857 } else {
5858 len = min_t(u64, len, root->sectorsize);
5859 }
5860
5861 lockstart = start;
5862 lockend = start + len - 1;
5863
5864 /*
5865 * If this errors out it's because we couldn't invalidate pagecache for
5866 * this range and we need to fallback to buffered.
5867 */
5868 if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
5869 return -ENOTBLK;
5870
5871 if (create) {
5872 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
5873 lockend, EXTENT_DELALLOC, NULL,
5874 &cached_state, GFP_NOFS);
5875 if (ret)
5876 goto unlock_err;
5877 }
5785 5878
5786 em = btrfs_get_extent(inode, NULL, 0, start, len, 0); 5879 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
5787 if (IS_ERR(em)) 5880 if (IS_ERR(em)) {
5788 return PTR_ERR(em); 5881 ret = PTR_ERR(em);
5882 goto unlock_err;
5883 }
5789 5884
5790 /* 5885 /*
5791 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 5886 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
@@ -5804,17 +5899,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5804 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 5899 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
5805 em->block_start == EXTENT_MAP_INLINE) { 5900 em->block_start == EXTENT_MAP_INLINE) {
5806 free_extent_map(em); 5901 free_extent_map(em);
5807 return -ENOTBLK; 5902 ret = -ENOTBLK;
5903 goto unlock_err;
5808 } 5904 }
5809 5905
5810 /* Just a good old fashioned hole, return */ 5906 /* Just a good old fashioned hole, return */
5811 if (!create && (em->block_start == EXTENT_MAP_HOLE || 5907 if (!create && (em->block_start == EXTENT_MAP_HOLE ||
5812 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 5908 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5813 free_extent_map(em); 5909 free_extent_map(em);
5814 /* DIO will do one hole at a time, so just unlock a sector */ 5910 ret = 0;
5815 unlock_extent(&BTRFS_I(inode)->io_tree, start, 5911 goto unlock_err;
5816 start + root->sectorsize - 1);
5817 return 0;
5818 } 5912 }
5819 5913
5820 /* 5914 /*
@@ -5827,8 +5921,9 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5827 * 5921 *
5828 */ 5922 */
5829 if (!create) { 5923 if (!create) {
5830 len = em->len - (start - em->start); 5924 len = min(len, em->len - (start - em->start));
5831 goto map; 5925 lockstart = start + len;
5926 goto unlock;
5832 } 5927 }
5833 5928
5834 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 5929 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
@@ -5860,7 +5955,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5860 btrfs_end_transaction(trans, root); 5955 btrfs_end_transaction(trans, root);
5861 if (ret) { 5956 if (ret) {
5862 free_extent_map(em); 5957 free_extent_map(em);
5863 return ret; 5958 goto unlock_err;
5864 } 5959 }
5865 goto unlock; 5960 goto unlock;
5866 } 5961 }
@@ -5873,14 +5968,12 @@ must_cow:
5873 */ 5968 */
5874 len = bh_result->b_size; 5969 len = bh_result->b_size;
5875 em = btrfs_new_extent_direct(inode, em, start, len); 5970 em = btrfs_new_extent_direct(inode, em, start, len);
5876 if (IS_ERR(em)) 5971 if (IS_ERR(em)) {
5877 return PTR_ERR(em); 5972 ret = PTR_ERR(em);
5973 goto unlock_err;
5974 }
5878 len = min(len, em->len - (start - em->start)); 5975 len = min(len, em->len - (start - em->start));
5879unlock: 5976unlock:
5880 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
5881 EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
5882 0, NULL, GFP_NOFS);
5883map:
5884 bh_result->b_blocknr = (em->block_start + (start - em->start)) >> 5977 bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
5885 inode->i_blkbits; 5978 inode->i_blkbits;
5886 bh_result->b_size = len; 5979 bh_result->b_size = len;
@@ -5898,9 +5991,44 @@ map:
5898 i_size_write(inode, start + len); 5991 i_size_write(inode, start + len);
5899 } 5992 }
5900 5993
5994 /*
5995 * In the case of write we need to clear and unlock the entire range,
5996 * in the case of read we need to unlock only the end area that we
5997 * aren't using if there is any left over space.
5998 */
5999 if (lockstart < lockend) {
6000 if (create && len < lockend - lockstart) {
6001 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6002 lockstart + len - 1, unlock_bits, 1, 0,
6003 &cached_state, GFP_NOFS);
6004 /*
6005 * Beside unlock, we also need to cleanup reserved space
6006 * for the left range by attaching EXTENT_DO_ACCOUNTING.
6007 */
6008 clear_extent_bit(&BTRFS_I(inode)->io_tree,
6009 lockstart + len, lockend,
6010 unlock_bits | EXTENT_DO_ACCOUNTING,
6011 1, 0, NULL, GFP_NOFS);
6012 } else {
6013 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6014 lockend, unlock_bits, 1, 0,
6015 &cached_state, GFP_NOFS);
6016 }
6017 } else {
6018 free_extent_state(cached_state);
6019 }
6020
5901 free_extent_map(em); 6021 free_extent_map(em);
5902 6022
5903 return 0; 6023 return 0;
6024
6025unlock_err:
6026 if (create)
6027 unlock_bits |= EXTENT_DO_ACCOUNTING;
6028
6029 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6030 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
6031 return ret;
5904} 6032}
5905 6033
5906struct btrfs_dio_private { 6034struct btrfs_dio_private {
@@ -5908,7 +6036,6 @@ struct btrfs_dio_private {
5908 u64 logical_offset; 6036 u64 logical_offset;
5909 u64 disk_bytenr; 6037 u64 disk_bytenr;
5910 u64 bytes; 6038 u64 bytes;
5911 u32 *csums;
5912 void *private; 6039 void *private;
5913 6040
5914 /* number of bios pending for this dio */ 6041 /* number of bios pending for this dio */
@@ -5928,7 +6055,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5928 struct inode *inode = dip->inode; 6055 struct inode *inode = dip->inode;
5929 struct btrfs_root *root = BTRFS_I(inode)->root; 6056 struct btrfs_root *root = BTRFS_I(inode)->root;
5930 u64 start; 6057 u64 start;
5931 u32 *private = dip->csums;
5932 6058
5933 start = dip->logical_offset; 6059 start = dip->logical_offset;
5934 do { 6060 do {
@@ -5936,8 +6062,12 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5936 struct page *page = bvec->bv_page; 6062 struct page *page = bvec->bv_page;
5937 char *kaddr; 6063 char *kaddr;
5938 u32 csum = ~(u32)0; 6064 u32 csum = ~(u32)0;
6065 u64 private = ~(u32)0;
5939 unsigned long flags; 6066 unsigned long flags;
5940 6067
6068 if (get_state_private(&BTRFS_I(inode)->io_tree,
6069 start, &private))
6070 goto failed;
5941 local_irq_save(flags); 6071 local_irq_save(flags);
5942 kaddr = kmap_atomic(page); 6072 kaddr = kmap_atomic(page);
5943 csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, 6073 csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
@@ -5947,18 +6077,18 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5947 local_irq_restore(flags); 6077 local_irq_restore(flags);
5948 6078
5949 flush_dcache_page(bvec->bv_page); 6079 flush_dcache_page(bvec->bv_page);
5950 if (csum != *private) { 6080 if (csum != private) {
6081failed:
5951 printk(KERN_ERR "btrfs csum failed ino %llu off" 6082 printk(KERN_ERR "btrfs csum failed ino %llu off"
5952 " %llu csum %u private %u\n", 6083 " %llu csum %u private %u\n",
5953 (unsigned long long)btrfs_ino(inode), 6084 (unsigned long long)btrfs_ino(inode),
5954 (unsigned long long)start, 6085 (unsigned long long)start,
5955 csum, *private); 6086 csum, (unsigned)private);
5956 err = -EIO; 6087 err = -EIO;
5957 } 6088 }
5958 } 6089 }
5959 6090
5960 start += bvec->bv_len; 6091 start += bvec->bv_len;
5961 private++;
5962 bvec++; 6092 bvec++;
5963 } while (bvec <= bvec_end); 6093 } while (bvec <= bvec_end);
5964 6094
@@ -5966,7 +6096,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5966 dip->logical_offset + dip->bytes - 1); 6096 dip->logical_offset + dip->bytes - 1);
5967 bio->bi_private = dip->private; 6097 bio->bi_private = dip->private;
5968 6098
5969 kfree(dip->csums);
5970 kfree(dip); 6099 kfree(dip);
5971 6100
5972 /* If we had a csum failure make sure to clear the uptodate flag */ 6101 /* If we had a csum failure make sure to clear the uptodate flag */
@@ -6072,7 +6201,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
6072 6201
6073static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, 6202static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
6074 int rw, u64 file_offset, int skip_sum, 6203 int rw, u64 file_offset, int skip_sum,
6075 u32 *csums, int async_submit) 6204 int async_submit)
6076{ 6205{
6077 int write = rw & REQ_WRITE; 6206 int write = rw & REQ_WRITE;
6078 struct btrfs_root *root = BTRFS_I(inode)->root; 6207 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -6105,8 +6234,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
6105 if (ret) 6234 if (ret)
6106 goto err; 6235 goto err;
6107 } else if (!skip_sum) { 6236 } else if (!skip_sum) {
6108 ret = btrfs_lookup_bio_sums_dio(root, inode, bio, 6237 ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset);
6109 file_offset, csums);
6110 if (ret) 6238 if (ret)
6111 goto err; 6239 goto err;
6112 } 6240 }
@@ -6132,10 +6260,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6132 u64 submit_len = 0; 6260 u64 submit_len = 0;
6133 u64 map_length; 6261 u64 map_length;
6134 int nr_pages = 0; 6262 int nr_pages = 0;
6135 u32 *csums = dip->csums;
6136 int ret = 0; 6263 int ret = 0;
6137 int async_submit = 0; 6264 int async_submit = 0;
6138 int write = rw & REQ_WRITE;
6139 6265
6140 map_length = orig_bio->bi_size; 6266 map_length = orig_bio->bi_size;
6141 ret = btrfs_map_block(map_tree, READ, start_sector << 9, 6267 ret = btrfs_map_block(map_tree, READ, start_sector << 9,
@@ -6171,16 +6297,13 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6171 atomic_inc(&dip->pending_bios); 6297 atomic_inc(&dip->pending_bios);
6172 ret = __btrfs_submit_dio_bio(bio, inode, rw, 6298 ret = __btrfs_submit_dio_bio(bio, inode, rw,
6173 file_offset, skip_sum, 6299 file_offset, skip_sum,
6174 csums, async_submit); 6300 async_submit);
6175 if (ret) { 6301 if (ret) {
6176 bio_put(bio); 6302 bio_put(bio);
6177 atomic_dec(&dip->pending_bios); 6303 atomic_dec(&dip->pending_bios);
6178 goto out_err; 6304 goto out_err;
6179 } 6305 }
6180 6306
6181 /* Write's use the ordered csums */
6182 if (!write && !skip_sum)
6183 csums = csums + nr_pages;
6184 start_sector += submit_len >> 9; 6307 start_sector += submit_len >> 9;
6185 file_offset += submit_len; 6308 file_offset += submit_len;
6186 6309
@@ -6210,7 +6333,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6210 6333
6211submit: 6334submit:
6212 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, 6335 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
6213 csums, async_submit); 6336 async_submit);
6214 if (!ret) 6337 if (!ret)
6215 return 0; 6338 return 0;
6216 6339
@@ -6246,17 +6369,6 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
6246 ret = -ENOMEM; 6369 ret = -ENOMEM;
6247 goto free_ordered; 6370 goto free_ordered;
6248 } 6371 }
6249 dip->csums = NULL;
6250
6251 /* Write's use the ordered csum stuff, so we don't need dip->csums */
6252 if (!write && !skip_sum) {
6253 dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
6254 if (!dip->csums) {
6255 kfree(dip);
6256 ret = -ENOMEM;
6257 goto free_ordered;
6258 }
6259 }
6260 6372
6261 dip->private = bio->bi_private; 6373 dip->private = bio->bi_private;
6262 dip->inode = inode; 6374 dip->inode = inode;
@@ -6341,132 +6453,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
6341out: 6453out:
6342 return retval; 6454 return retval;
6343} 6455}
6456
6344static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, 6457static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6345 const struct iovec *iov, loff_t offset, 6458 const struct iovec *iov, loff_t offset,
6346 unsigned long nr_segs) 6459 unsigned long nr_segs)
6347{ 6460{
6348 struct file *file = iocb->ki_filp; 6461 struct file *file = iocb->ki_filp;
6349 struct inode *inode = file->f_mapping->host; 6462 struct inode *inode = file->f_mapping->host;
6350 struct btrfs_ordered_extent *ordered;
6351 struct extent_state *cached_state = NULL;
6352 u64 lockstart, lockend;
6353 ssize_t ret;
6354 int writing = rw & WRITE;
6355 int write_bits = 0;
6356 size_t count = iov_length(iov, nr_segs);
6357 6463
6358 if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov, 6464 if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
6359 offset, nr_segs)) { 6465 offset, nr_segs))
6360 return 0; 6466 return 0;
6361 }
6362
6363 lockstart = offset;
6364 lockend = offset + count - 1;
6365
6366 if (writing) {
6367 ret = btrfs_delalloc_reserve_space(inode, count);
6368 if (ret)
6369 goto out;
6370 }
6371
6372 while (1) {
6373 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6374 0, &cached_state);
6375 /*
6376 * We're concerned with the entire range that we're going to be
6377 * doing DIO to, so we need to make sure theres no ordered
6378 * extents in this range.
6379 */
6380 ordered = btrfs_lookup_ordered_range(inode, lockstart,
6381 lockend - lockstart + 1);
6382
6383 /*
6384 * We need to make sure there are no buffered pages in this
6385 * range either, we could have raced between the invalidate in
6386 * generic_file_direct_write and locking the extent. The
6387 * invalidate needs to happen so that reads after a write do not
6388 * get stale data.
6389 */
6390 if (!ordered && (!writing ||
6391 !test_range_bit(&BTRFS_I(inode)->io_tree,
6392 lockstart, lockend, EXTENT_UPTODATE, 0,
6393 cached_state)))
6394 break;
6395
6396 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6397 &cached_state, GFP_NOFS);
6398
6399 if (ordered) {
6400 btrfs_start_ordered_extent(inode, ordered, 1);
6401 btrfs_put_ordered_extent(ordered);
6402 } else {
6403 /* Screw you mmap */
6404 ret = filemap_write_and_wait_range(file->f_mapping,
6405 lockstart,
6406 lockend);
6407 if (ret)
6408 goto out;
6409
6410 /*
6411 * If we found a page that couldn't be invalidated just
6412 * fall back to buffered.
6413 */
6414 ret = invalidate_inode_pages2_range(file->f_mapping,
6415 lockstart >> PAGE_CACHE_SHIFT,
6416 lockend >> PAGE_CACHE_SHIFT);
6417 if (ret) {
6418 if (ret == -EBUSY)
6419 ret = 0;
6420 goto out;
6421 }
6422 }
6423
6424 cond_resched();
6425 }
6426 6467
6427 /* 6468 return __blockdev_direct_IO(rw, iocb, inode,
6428 * we don't use btrfs_set_extent_delalloc because we don't want
6429 * the dirty or uptodate bits
6430 */
6431 if (writing) {
6432 write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
6433 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6434 EXTENT_DELALLOC, NULL, &cached_state,
6435 GFP_NOFS);
6436 if (ret) {
6437 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6438 lockend, EXTENT_LOCKED | write_bits,
6439 1, 0, &cached_state, GFP_NOFS);
6440 goto out;
6441 }
6442 }
6443
6444 free_extent_state(cached_state);
6445 cached_state = NULL;
6446
6447 ret = __blockdev_direct_IO(rw, iocb, inode,
6448 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, 6469 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
6449 iov, offset, nr_segs, btrfs_get_blocks_direct, NULL, 6470 iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
6450 btrfs_submit_direct, 0); 6471 btrfs_submit_direct, 0);
6451
6452 if (ret < 0 && ret != -EIOCBQUEUED) {
6453 clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
6454 offset + iov_length(iov, nr_segs) - 1,
6455 EXTENT_LOCKED | write_bits, 1, 0,
6456 &cached_state, GFP_NOFS);
6457 } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
6458 /*
6459 * We're falling back to buffered, unlock the section we didn't
6460 * do IO on.
6461 */
6462 clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
6463 offset + iov_length(iov, nr_segs) - 1,
6464 EXTENT_LOCKED | write_bits, 1, 0,
6465 &cached_state, GFP_NOFS);
6466 }
6467out:
6468 free_extent_state(cached_state);
6469 return ret;
6470} 6472}
6471 6473
6472static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 6474static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 7bb755677a22..9df50fa8a078 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -424,7 +424,7 @@ static noinline int create_subvol(struct btrfs_root *root,
424 uuid_le_gen(&new_uuid); 424 uuid_le_gen(&new_uuid);
425 memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE); 425 memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE);
426 root_item.otime.sec = cpu_to_le64(cur_time.tv_sec); 426 root_item.otime.sec = cpu_to_le64(cur_time.tv_sec);
427 root_item.otime.nsec = cpu_to_le64(cur_time.tv_nsec); 427 root_item.otime.nsec = cpu_to_le32(cur_time.tv_nsec);
428 root_item.ctime = root_item.otime; 428 root_item.ctime = root_item.otime;
429 btrfs_set_root_ctransid(&root_item, trans->transid); 429 btrfs_set_root_ctransid(&root_item, trans->transid);
430 btrfs_set_root_otransid(&root_item, trans->transid); 430 btrfs_set_root_otransid(&root_item, trans->transid);
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index a44eff074805..2a1762c66041 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -67,7 +67,7 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
67{ 67{
68 if (eb->lock_nested) { 68 if (eb->lock_nested) {
69 read_lock(&eb->lock); 69 read_lock(&eb->lock);
70 if (&eb->lock_nested && current->pid == eb->lock_owner) { 70 if (eb->lock_nested && current->pid == eb->lock_owner) {
71 read_unlock(&eb->lock); 71 read_unlock(&eb->lock);
72 return; 72 return;
73 } 73 }
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index bc424ae5a81a..38b42e7bc91d 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1364,13 +1364,17 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1364 spin_lock(&fs_info->qgroup_lock); 1364 spin_lock(&fs_info->qgroup_lock);
1365 1365
1366 dstgroup = add_qgroup_rb(fs_info, objectid); 1366 dstgroup = add_qgroup_rb(fs_info, objectid);
1367 if (!dstgroup) 1367 if (IS_ERR(dstgroup)) {
1368 ret = PTR_ERR(dstgroup);
1368 goto unlock; 1369 goto unlock;
1370 }
1369 1371
1370 if (srcid) { 1372 if (srcid) {
1371 srcgroup = find_qgroup_rb(fs_info, srcid); 1373 srcgroup = find_qgroup_rb(fs_info, srcid);
1372 if (!srcgroup) 1374 if (!srcgroup) {
1375 ret = -EINVAL;
1373 goto unlock; 1376 goto unlock;
1377 }
1374 dstgroup->rfer = srcgroup->rfer - level_size; 1378 dstgroup->rfer = srcgroup->rfer - level_size;
1375 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size; 1379 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size;
1376 srcgroup->excl = level_size; 1380 srcgroup->excl = level_size;
@@ -1379,8 +1383,10 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1379 qgroup_dirty(fs_info, srcgroup); 1383 qgroup_dirty(fs_info, srcgroup);
1380 } 1384 }
1381 1385
1382 if (!inherit) 1386 if (!inherit) {
1387 ret = -EINVAL;
1383 goto unlock; 1388 goto unlock;
1389 }
1384 1390
1385 i_qgroups = (u64 *)(inherit + 1); 1391 i_qgroups = (u64 *)(inherit + 1);
1386 for (i = 0; i < inherit->num_qgroups; ++i) { 1392 for (i = 0; i < inherit->num_qgroups; ++i) {
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 6bb465cca20f..10d8e4d88071 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -544,8 +544,8 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans,
544 struct timespec ct = CURRENT_TIME; 544 struct timespec ct = CURRENT_TIME;
545 545
546 spin_lock(&root->root_times_lock); 546 spin_lock(&root->root_times_lock);
547 item->ctransid = trans->transid; 547 item->ctransid = cpu_to_le64(trans->transid);
548 item->ctime.sec = cpu_to_le64(ct.tv_sec); 548 item->ctime.sec = cpu_to_le64(ct.tv_sec);
549 item->ctime.nsec = cpu_to_le64(ct.tv_nsec); 549 item->ctime.nsec = cpu_to_le32(ct.tv_nsec);
550 spin_unlock(&root->root_times_lock); 550 spin_unlock(&root->root_times_lock);
551} 551}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index f2eb24c477a3..83d6f9f9c220 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -838,7 +838,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
838 struct btrfs_trans_handle *trans; 838 struct btrfs_trans_handle *trans;
839 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 839 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
840 struct btrfs_root *root = fs_info->tree_root; 840 struct btrfs_root *root = fs_info->tree_root;
841 int ret;
842 841
843 trace_btrfs_sync_fs(wait); 842 trace_btrfs_sync_fs(wait);
844 843
@@ -849,11 +848,17 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
849 848
850 btrfs_wait_ordered_extents(root, 0, 0); 849 btrfs_wait_ordered_extents(root, 0, 0);
851 850
852 trans = btrfs_start_transaction(root, 0); 851 spin_lock(&fs_info->trans_lock);
852 if (!fs_info->running_transaction) {
853 spin_unlock(&fs_info->trans_lock);
854 return 0;
855 }
856 spin_unlock(&fs_info->trans_lock);
857
858 trans = btrfs_join_transaction(root);
853 if (IS_ERR(trans)) 859 if (IS_ERR(trans))
854 return PTR_ERR(trans); 860 return PTR_ERR(trans);
855 ret = btrfs_commit_transaction(trans, root); 861 return btrfs_commit_transaction(trans, root);
856 return ret;
857} 862}
858 863
859static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) 864static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
@@ -1530,6 +1535,8 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
1530 while (cur_devices) { 1535 while (cur_devices) {
1531 head = &cur_devices->devices; 1536 head = &cur_devices->devices;
1532 list_for_each_entry(dev, head, dev_list) { 1537 list_for_each_entry(dev, head, dev_list) {
1538 if (dev->missing)
1539 continue;
1533 if (!first_dev || dev->devid < first_dev->devid) 1540 if (!first_dev || dev->devid < first_dev->devid)
1534 first_dev = dev; 1541 first_dev = dev;
1535 } 1542 }
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 17be3dedacba..27c26004e050 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1031,6 +1031,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1031 1031
1032 btrfs_i_size_write(parent_inode, parent_inode->i_size + 1032 btrfs_i_size_write(parent_inode, parent_inode->i_size +
1033 dentry->d_name.len * 2); 1033 dentry->d_name.len * 2);
1034 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1034 ret = btrfs_update_inode(trans, parent_root, parent_inode); 1035 ret = btrfs_update_inode(trans, parent_root, parent_inode);
1035 if (ret) 1036 if (ret)
1036 goto abort_trans_dput; 1037 goto abort_trans_dput;
@@ -1066,7 +1067,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1066 memcpy(new_root_item->parent_uuid, root->root_item.uuid, 1067 memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1067 BTRFS_UUID_SIZE); 1068 BTRFS_UUID_SIZE);
1068 new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec); 1069 new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
1069 new_root_item->otime.nsec = cpu_to_le64(cur_time.tv_nsec); 1070 new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
1070 btrfs_set_root_otransid(new_root_item, trans->transid); 1071 btrfs_set_root_otransid(new_root_item, trans->transid);
1071 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); 1072 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1072 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); 1073 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index e86ae04abe6a..88b969aeeb71 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -227,9 +227,8 @@ loop_lock:
227 cur = pending; 227 cur = pending;
228 pending = pending->bi_next; 228 pending = pending->bi_next;
229 cur->bi_next = NULL; 229 cur->bi_next = NULL;
230 atomic_dec(&fs_info->nr_async_bios);
231 230
232 if (atomic_read(&fs_info->nr_async_bios) < limit && 231 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
233 waitqueue_active(&fs_info->async_submit_wait)) 232 waitqueue_active(&fs_info->async_submit_wait))
234 wake_up(&fs_info->async_submit_wait); 233 wake_up(&fs_info->async_submit_wait);
235 234
@@ -569,9 +568,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
569 memcpy(new_device, device, sizeof(*new_device)); 568 memcpy(new_device, device, sizeof(*new_device));
570 569
571 /* Safe because we are under uuid_mutex */ 570 /* Safe because we are under uuid_mutex */
572 name = rcu_string_strdup(device->name->str, GFP_NOFS); 571 if (device->name) {
573 BUG_ON(device->name && !name); /* -ENOMEM */ 572 name = rcu_string_strdup(device->name->str, GFP_NOFS);
574 rcu_assign_pointer(new_device->name, name); 573 BUG_ON(device->name && !name); /* -ENOMEM */
574 rcu_assign_pointer(new_device->name, name);
575 }
575 new_device->bdev = NULL; 576 new_device->bdev = NULL;
576 new_device->writeable = 0; 577 new_device->writeable = 0;
577 new_device->in_fs_metadata = 0; 578 new_device->in_fs_metadata = 0;
@@ -4605,28 +4606,6 @@ int btrfs_read_sys_array(struct btrfs_root *root)
4605 return ret; 4606 return ret;
4606} 4607}
4607 4608
4608struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
4609 u64 logical, int mirror_num)
4610{
4611 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4612 int ret;
4613 u64 map_length = 0;
4614 struct btrfs_bio *bbio = NULL;
4615 struct btrfs_device *device;
4616
4617 BUG_ON(mirror_num == 0);
4618 ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio,
4619 mirror_num);
4620 if (ret) {
4621 BUG_ON(bbio != NULL);
4622 return NULL;
4623 }
4624 BUG_ON(mirror_num != bbio->mirror_num);
4625 device = bbio->stripes[mirror_num - 1].dev;
4626 kfree(bbio);
4627 return device;
4628}
4629
4630int btrfs_read_chunk_tree(struct btrfs_root *root) 4609int btrfs_read_chunk_tree(struct btrfs_root *root)
4631{ 4610{
4632 struct btrfs_path *path; 4611 struct btrfs_path *path;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 5479325987b3..53c06af92e8d 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -289,8 +289,6 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
289int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); 289int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
290int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 290int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
291 u64 *start, u64 *max_avail); 291 u64 *start, u64 *max_avail);
292struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
293 u64 logical, int mirror_num);
294void btrfs_dev_stat_print_on_error(struct btrfs_device *device); 292void btrfs_dev_stat_print_on_error(struct btrfs_device *device);
295void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); 293void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
296int btrfs_get_dev_stats(struct btrfs_root *root, 294int btrfs_get_dev_stats(struct btrfs_root *root,
diff --git a/fs/buffer.c b/fs/buffer.c
index 9f6d2e41281d..58e2e7b77372 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -914,7 +914,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
914/* 914/*
915 * Initialise the state of a blockdev page's buffers. 915 * Initialise the state of a blockdev page's buffers.
916 */ 916 */
917static void 917static sector_t
918init_page_buffers(struct page *page, struct block_device *bdev, 918init_page_buffers(struct page *page, struct block_device *bdev,
919 sector_t block, int size) 919 sector_t block, int size)
920{ 920{
@@ -936,33 +936,41 @@ init_page_buffers(struct page *page, struct block_device *bdev,
936 block++; 936 block++;
937 bh = bh->b_this_page; 937 bh = bh->b_this_page;
938 } while (bh != head); 938 } while (bh != head);
939
940 /*
941 * Caller needs to validate requested block against end of device.
942 */
943 return end_block;
939} 944}
940 945
941/* 946/*
942 * Create the page-cache page that contains the requested block. 947 * Create the page-cache page that contains the requested block.
943 * 948 *
944 * This is user purely for blockdev mappings. 949 * This is used purely for blockdev mappings.
945 */ 950 */
946static struct page * 951static int
947grow_dev_page(struct block_device *bdev, sector_t block, 952grow_dev_page(struct block_device *bdev, sector_t block,
948 pgoff_t index, int size) 953 pgoff_t index, int size, int sizebits)
949{ 954{
950 struct inode *inode = bdev->bd_inode; 955 struct inode *inode = bdev->bd_inode;
951 struct page *page; 956 struct page *page;
952 struct buffer_head *bh; 957 struct buffer_head *bh;
958 sector_t end_block;
959 int ret = 0; /* Will call free_more_memory() */
953 960
954 page = find_or_create_page(inode->i_mapping, index, 961 page = find_or_create_page(inode->i_mapping, index,
955 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE); 962 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
956 if (!page) 963 if (!page)
957 return NULL; 964 return ret;
958 965
959 BUG_ON(!PageLocked(page)); 966 BUG_ON(!PageLocked(page));
960 967
961 if (page_has_buffers(page)) { 968 if (page_has_buffers(page)) {
962 bh = page_buffers(page); 969 bh = page_buffers(page);
963 if (bh->b_size == size) { 970 if (bh->b_size == size) {
964 init_page_buffers(page, bdev, block, size); 971 end_block = init_page_buffers(page, bdev,
965 return page; 972 index << sizebits, size);
973 goto done;
966 } 974 }
967 if (!try_to_free_buffers(page)) 975 if (!try_to_free_buffers(page))
968 goto failed; 976 goto failed;
@@ -982,14 +990,14 @@ grow_dev_page(struct block_device *bdev, sector_t block,
982 */ 990 */
983 spin_lock(&inode->i_mapping->private_lock); 991 spin_lock(&inode->i_mapping->private_lock);
984 link_dev_buffers(page, bh); 992 link_dev_buffers(page, bh);
985 init_page_buffers(page, bdev, block, size); 993 end_block = init_page_buffers(page, bdev, index << sizebits, size);
986 spin_unlock(&inode->i_mapping->private_lock); 994 spin_unlock(&inode->i_mapping->private_lock);
987 return page; 995done:
988 996 ret = (block < end_block) ? 1 : -ENXIO;
989failed: 997failed:
990 unlock_page(page); 998 unlock_page(page);
991 page_cache_release(page); 999 page_cache_release(page);
992 return NULL; 1000 return ret;
993} 1001}
994 1002
995/* 1003/*
@@ -999,7 +1007,6 @@ failed:
999static int 1007static int
1000grow_buffers(struct block_device *bdev, sector_t block, int size) 1008grow_buffers(struct block_device *bdev, sector_t block, int size)
1001{ 1009{
1002 struct page *page;
1003 pgoff_t index; 1010 pgoff_t index;
1004 int sizebits; 1011 int sizebits;
1005 1012
@@ -1023,22 +1030,14 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
1023 bdevname(bdev, b)); 1030 bdevname(bdev, b));
1024 return -EIO; 1031 return -EIO;
1025 } 1032 }
1026 block = index << sizebits; 1033
1027 /* Create a page with the proper size buffers.. */ 1034 /* Create a page with the proper size buffers.. */
1028 page = grow_dev_page(bdev, block, index, size); 1035 return grow_dev_page(bdev, block, index, size, sizebits);
1029 if (!page)
1030 return 0;
1031 unlock_page(page);
1032 page_cache_release(page);
1033 return 1;
1034} 1036}
1035 1037
1036static struct buffer_head * 1038static struct buffer_head *
1037__getblk_slow(struct block_device *bdev, sector_t block, int size) 1039__getblk_slow(struct block_device *bdev, sector_t block, int size)
1038{ 1040{
1039 int ret;
1040 struct buffer_head *bh;
1041
1042 /* Size must be multiple of hard sectorsize */ 1041 /* Size must be multiple of hard sectorsize */
1043 if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 1042 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1044 (size < 512 || size > PAGE_SIZE))) { 1043 (size < 512 || size > PAGE_SIZE))) {
@@ -1051,21 +1050,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
1051 return NULL; 1050 return NULL;
1052 } 1051 }
1053 1052
1054retry: 1053 for (;;) {
1055 bh = __find_get_block(bdev, block, size); 1054 struct buffer_head *bh;
1056 if (bh) 1055 int ret;
1057 return bh;
1058 1056
1059 ret = grow_buffers(bdev, block, size);
1060 if (ret == 0) {
1061 free_more_memory();
1062 goto retry;
1063 } else if (ret > 0) {
1064 bh = __find_get_block(bdev, block, size); 1057 bh = __find_get_block(bdev, block, size);
1065 if (bh) 1058 if (bh)
1066 return bh; 1059 return bh;
1060
1061 ret = grow_buffers(bdev, block, size);
1062 if (ret < 0)
1063 return NULL;
1064 if (ret == 0)
1065 free_more_memory();
1067 } 1066 }
1068 return NULL;
1069} 1067}
1070 1068
1071/* 1069/*
@@ -1321,10 +1319,6 @@ EXPORT_SYMBOL(__find_get_block);
1321 * which corresponds to the passed block_device, block and size. The 1319 * which corresponds to the passed block_device, block and size. The
1322 * returned buffer has its reference count incremented. 1320 * returned buffer has its reference count incremented.
1323 * 1321 *
1324 * __getblk() cannot fail - it just keeps trying. If you pass it an
1325 * illegal block number, __getblk() will happily return a buffer_head
1326 * which represents the non-existent block. Very weird.
1327 *
1328 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() 1322 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1329 * attempt is failing. FIXME, perhaps? 1323 * attempt is failing. FIXME, perhaps?
1330 */ 1324 */
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index fb962efdacee..6d59006bfa27 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -201,6 +201,7 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
201 int err = -ENOMEM; 201 int err = -ENOMEM;
202 202
203 dout("ceph_fs_debugfs_init\n"); 203 dout("ceph_fs_debugfs_init\n");
204 BUG_ON(!fsc->client->debugfs_dir);
204 fsc->debugfs_congestion_kb = 205 fsc->debugfs_congestion_kb =
205 debugfs_create_file("writeback_congestion_kb", 206 debugfs_create_file("writeback_congestion_kb",
206 0600, 207 0600,
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 9fff9f3b17e4..4b5762ef7c2b 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -992,11 +992,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
992 if (rinfo->head->is_dentry) { 992 if (rinfo->head->is_dentry) {
993 struct inode *dir = req->r_locked_dir; 993 struct inode *dir = req->r_locked_dir;
994 994
995 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, 995 if (dir) {
996 session, req->r_request_started, -1, 996 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
997 &req->r_caps_reservation); 997 session, req->r_request_started, -1,
998 if (err < 0) 998 &req->r_caps_reservation);
999 return err; 999 if (err < 0)
1000 return err;
1001 } else {
1002 WARN_ON_ONCE(1);
1003 }
1000 } 1004 }
1001 1005
1002 /* 1006 /*
@@ -1004,6 +1008,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1004 * will have trouble splicing in the virtual snapdir later 1008 * will have trouble splicing in the virtual snapdir later
1005 */ 1009 */
1006 if (rinfo->head->is_dentry && !req->r_aborted && 1010 if (rinfo->head->is_dentry && !req->r_aborted &&
1011 req->r_locked_dir &&
1007 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 1012 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1008 fsc->mount_options->snapdir_name, 1013 fsc->mount_options->snapdir_name,
1009 req->r_dentry->d_name.len))) { 1014 req->r_dentry->d_name.len))) {
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 8e3fb69fbe62..1396ceb46797 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -42,7 +42,8 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
42 /* validate striping parameters */ 42 /* validate striping parameters */
43 if ((l->object_size & ~PAGE_MASK) || 43 if ((l->object_size & ~PAGE_MASK) ||
44 (l->stripe_unit & ~PAGE_MASK) || 44 (l->stripe_unit & ~PAGE_MASK) ||
45 ((unsigned)l->object_size % (unsigned)l->stripe_unit)) 45 (l->stripe_unit != 0 &&
46 ((unsigned)l->object_size % (unsigned)l->stripe_unit)))
46 return -EINVAL; 47 return -EINVAL;
47 48
48 /* make sure it's a valid data pool */ 49 /* make sure it's a valid data pool */
diff --git a/fs/compat.c b/fs/compat.c
index 6161255fac45..1bdb350ea5d3 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1155,11 +1155,14 @@ compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec,
1155 struct file *file; 1155 struct file *file;
1156 int fput_needed; 1156 int fput_needed;
1157 ssize_t ret; 1157 ssize_t ret;
1158 loff_t pos;
1158 1159
1159 file = fget_light(fd, &fput_needed); 1160 file = fget_light(fd, &fput_needed);
1160 if (!file) 1161 if (!file)
1161 return -EBADF; 1162 return -EBADF;
1162 ret = compat_readv(file, vec, vlen, &file->f_pos); 1163 pos = file->f_pos;
1164 ret = compat_readv(file, vec, vlen, &pos);
1165 file->f_pos = pos;
1163 fput_light(file, fput_needed); 1166 fput_light(file, fput_needed);
1164 return ret; 1167 return ret;
1165} 1168}
@@ -1221,11 +1224,14 @@ compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec,
1221 struct file *file; 1224 struct file *file;
1222 int fput_needed; 1225 int fput_needed;
1223 ssize_t ret; 1226 ssize_t ret;
1227 loff_t pos;
1224 1228
1225 file = fget_light(fd, &fput_needed); 1229 file = fget_light(fd, &fput_needed);
1226 if (!file) 1230 if (!file)
1227 return -EBADF; 1231 return -EBADF;
1228 ret = compat_writev(file, vec, vlen, &file->f_pos); 1232 pos = file->f_pos;
1233 ret = compat_writev(file, vec, vlen, &pos);
1234 file->f_pos = pos;
1229 fput_light(file, fput_needed); 1235 fput_light(file, fput_needed);
1230 return ret; 1236 return ret;
1231} 1237}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1faf4cb56f39..f86c720dba0e 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1062,6 +1062,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1062 unsigned long user_addr; 1062 unsigned long user_addr;
1063 size_t bytes; 1063 size_t bytes;
1064 struct buffer_head map_bh = { 0, }; 1064 struct buffer_head map_bh = { 0, };
1065 struct blk_plug plug;
1065 1066
1066 if (rw & WRITE) 1067 if (rw & WRITE)
1067 rw = WRITE_ODIRECT; 1068 rw = WRITE_ODIRECT;
@@ -1177,6 +1178,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1177 PAGE_SIZE - user_addr / PAGE_SIZE); 1178 PAGE_SIZE - user_addr / PAGE_SIZE);
1178 } 1179 }
1179 1180
1181 blk_start_plug(&plug);
1182
1180 for (seg = 0; seg < nr_segs; seg++) { 1183 for (seg = 0; seg < nr_segs; seg++) {
1181 user_addr = (unsigned long)iov[seg].iov_base; 1184 user_addr = (unsigned long)iov[seg].iov_base;
1182 sdio.size += bytes = iov[seg].iov_len; 1185 sdio.size += bytes = iov[seg].iov_len;
@@ -1235,6 +1238,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1235 if (sdio.bio) 1238 if (sdio.bio)
1236 dio_bio_submit(dio, &sdio); 1239 dio_bio_submit(dio, &sdio);
1237 1240
1241 blk_finish_plug(&plug);
1242
1238 /* 1243 /*
1239 * It is possible that, we return short IO due to end of file. 1244 * It is possible that, we return short IO due to end of file.
1240 * In that case, we need to release all the pages we got hold on. 1245 * In that case, we need to release all the pages we got hold on.
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 1c8b55670804..eedec84c1809 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1654,8 +1654,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
1654 error = PTR_ERR(file); 1654 error = PTR_ERR(file);
1655 goto out_free_fd; 1655 goto out_free_fd;
1656 } 1656 }
1657 fd_install(fd, file);
1658 ep->file = file; 1657 ep->file = file;
1658 fd_install(fd, file);
1659 return fd; 1659 return fd;
1660 1660
1661out_free_fd: 1661out_free_fd:
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 09357508ec9a..a2862339323b 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -1113,6 +1113,11 @@ static void mark_journal_empty(journal_t *journal)
1113 1113
1114 BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); 1114 BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
1115 spin_lock(&journal->j_state_lock); 1115 spin_lock(&journal->j_state_lock);
1116 /* Is it already empty? */
1117 if (sb->s_start == 0) {
1118 spin_unlock(&journal->j_state_lock);
1119 return;
1120 }
1116 jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n", 1121 jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n",
1117 journal->j_tail_sequence); 1122 journal->j_tail_sequence);
1118 1123
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index df0de27c2733..e784a217b500 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -26,6 +26,7 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
26 struct completion complete; 26 struct completion complete;
27 27
28 bio_init(&bio); 28 bio_init(&bio);
29 bio.bi_max_vecs = 1;
29 bio.bi_io_vec = &bio_vec; 30 bio.bi_io_vec = &bio_vec;
30 bio_vec.bv_page = page; 31 bio_vec.bv_page = page;
31 bio_vec.bv_len = PAGE_SIZE; 32 bio_vec.bv_len = PAGE_SIZE;
@@ -95,12 +96,11 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
95 struct address_space *mapping = super->s_mapping_inode->i_mapping; 96 struct address_space *mapping = super->s_mapping_inode->i_mapping;
96 struct bio *bio; 97 struct bio *bio;
97 struct page *page; 98 struct page *page;
98 struct request_queue *q = bdev_get_queue(sb->s_bdev); 99 unsigned int max_pages;
99 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
100 int i; 100 int i;
101 101
102 if (max_pages > BIO_MAX_PAGES) 102 max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
103 max_pages = BIO_MAX_PAGES; 103
104 bio = bio_alloc(GFP_NOFS, max_pages); 104 bio = bio_alloc(GFP_NOFS, max_pages);
105 BUG_ON(!bio); 105 BUG_ON(!bio);
106 106
@@ -190,12 +190,11 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
190{ 190{
191 struct logfs_super *super = logfs_super(sb); 191 struct logfs_super *super = logfs_super(sb);
192 struct bio *bio; 192 struct bio *bio;
193 struct request_queue *q = bdev_get_queue(sb->s_bdev); 193 unsigned int max_pages;
194 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
195 int i; 194 int i;
196 195
197 if (max_pages > BIO_MAX_PAGES) 196 max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
198 max_pages = BIO_MAX_PAGES; 197
199 bio = bio_alloc(GFP_NOFS, max_pages); 198 bio = bio_alloc(GFP_NOFS, max_pages);
200 BUG_ON(!bio); 199 BUG_ON(!bio);
201 200
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index a422f42238b2..6984562738d3 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -156,10 +156,26 @@ static void __logfs_destroy_inode(struct inode *inode)
156 call_rcu(&inode->i_rcu, logfs_i_callback); 156 call_rcu(&inode->i_rcu, logfs_i_callback);
157} 157}
158 158
159static void __logfs_destroy_meta_inode(struct inode *inode)
160{
161 struct logfs_inode *li = logfs_inode(inode);
162 BUG_ON(li->li_block);
163 call_rcu(&inode->i_rcu, logfs_i_callback);
164}
165
159static void logfs_destroy_inode(struct inode *inode) 166static void logfs_destroy_inode(struct inode *inode)
160{ 167{
161 struct logfs_inode *li = logfs_inode(inode); 168 struct logfs_inode *li = logfs_inode(inode);
162 169
170 if (inode->i_ino < LOGFS_RESERVED_INOS) {
171 /*
172 * The reserved inodes are never destroyed unless we are in
173 * unmont path.
174 */
175 __logfs_destroy_meta_inode(inode);
176 return;
177 }
178
163 BUG_ON(list_empty(&li->li_freeing_list)); 179 BUG_ON(list_empty(&li->li_freeing_list));
164 spin_lock(&logfs_inode_lock); 180 spin_lock(&logfs_inode_lock);
165 li->li_refcount--; 181 li->li_refcount--;
@@ -373,8 +389,8 @@ static void logfs_put_super(struct super_block *sb)
373{ 389{
374 struct logfs_super *super = logfs_super(sb); 390 struct logfs_super *super = logfs_super(sb);
375 /* kill the meta-inodes */ 391 /* kill the meta-inodes */
376 iput(super->s_master_inode);
377 iput(super->s_segfile_inode); 392 iput(super->s_segfile_inode);
393 iput(super->s_master_inode);
378 iput(super->s_mapping_inode); 394 iput(super->s_mapping_inode);
379} 395}
380 396
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index 1e1c369df22b..2a09b8d73989 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -565,7 +565,7 @@ static void write_wbuf(struct super_block *sb, struct logfs_area *area,
565 index = ofs >> PAGE_SHIFT; 565 index = ofs >> PAGE_SHIFT;
566 page_ofs = ofs & (PAGE_SIZE - 1); 566 page_ofs = ofs & (PAGE_SIZE - 1);
567 567
568 page = find_lock_page(mapping, index); 568 page = find_or_create_page(mapping, index, GFP_NOFS);
569 BUG_ON(!page); 569 BUG_ON(!page);
570 memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize); 570 memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize);
571 unlock_page(page); 571 unlock_page(page);
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index f1cb512c5019..5be0abef603d 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -2189,7 +2189,6 @@ void logfs_evict_inode(struct inode *inode)
2189 return; 2189 return;
2190 } 2190 }
2191 2191
2192 BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS);
2193 page = inode_to_page(inode); 2192 page = inode_to_page(inode);
2194 BUG_ON(!page); /* FIXME: Use emergency page */ 2193 BUG_ON(!page); /* FIXME: Use emergency page */
2195 logfs_put_write_page(page); 2194 logfs_put_write_page(page);
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index e28d090c98d6..038da0991794 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -886,7 +886,7 @@ static struct logfs_area *alloc_area(struct super_block *sb)
886 886
887static void map_invalidatepage(struct page *page, unsigned long l) 887static void map_invalidatepage(struct page *page, unsigned long l)
888{ 888{
889 BUG(); 889 return;
890} 890}
891 891
892static int map_releasepage(struct page *page, gfp_t g) 892static int map_releasepage(struct page *page, gfp_t g)
diff --git a/fs/namei.c b/fs/namei.c
index db76b866a097..dd1ed1b8e98e 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -352,6 +352,7 @@ int __inode_permission(struct inode *inode, int mask)
352/** 352/**
353 * sb_permission - Check superblock-level permissions 353 * sb_permission - Check superblock-level permissions
354 * @sb: Superblock of inode to check permission on 354 * @sb: Superblock of inode to check permission on
355 * @inode: Inode to check permission on
355 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) 356 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
356 * 357 *
357 * Separate out file-system wide checks from inode-specific permission checks. 358 * Separate out file-system wide checks from inode-specific permission checks.
@@ -656,6 +657,7 @@ int sysctl_protected_hardlinks __read_mostly = 1;
656/** 657/**
657 * may_follow_link - Check symlink following for unsafe situations 658 * may_follow_link - Check symlink following for unsafe situations
658 * @link: The path of the symlink 659 * @link: The path of the symlink
660 * @nd: nameidata pathwalk data
659 * 661 *
660 * In the case of the sysctl_protected_symlinks sysctl being enabled, 662 * In the case of the sysctl_protected_symlinks sysctl being enabled,
661 * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is 663 * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 8bf3a3f6925a..b7db60897f91 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -12,19 +12,19 @@ nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
12nfs-$(CONFIG_SYSCTL) += sysctl.o 12nfs-$(CONFIG_SYSCTL) += sysctl.o
13nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o 13nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
14 14
15obj-$(CONFIG_NFS_V2) += nfs2.o 15obj-$(CONFIG_NFS_V2) += nfsv2.o
16nfs2-y := nfs2super.o proc.o nfs2xdr.o 16nfsv2-y := nfs2super.o proc.o nfs2xdr.o
17 17
18obj-$(CONFIG_NFS_V3) += nfs3.o 18obj-$(CONFIG_NFS_V3) += nfsv3.o
19nfs3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o 19nfsv3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o
20nfs3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o 20nfsv3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
21 21
22obj-$(CONFIG_NFS_V4) += nfs4.o 22obj-$(CONFIG_NFS_V4) += nfsv4.o
23nfs4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \ 23nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \
24 delegation.o idmap.o callback.o callback_xdr.o callback_proc.o \ 24 delegation.o idmap.o callback.o callback_xdr.o callback_proc.o \
25 nfs4namespace.o nfs4getroot.o nfs4client.o 25 nfs4namespace.o nfs4getroot.o nfs4client.o
26nfs4-$(CONFIG_SYSCTL) += nfs4sysctl.o 26nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o
27nfs4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o 27nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o
28 28
29obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o 29obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o
30nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o 30nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 9fc0d9dfc91b..99694442b93f 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -105,7 +105,7 @@ struct nfs_subversion *get_nfs_version(unsigned int version)
105 105
106 if (IS_ERR(nfs)) { 106 if (IS_ERR(nfs)) {
107 mutex_lock(&nfs_version_mutex); 107 mutex_lock(&nfs_version_mutex);
108 request_module("nfs%d", version); 108 request_module("nfsv%d", version);
109 nfs = find_nfs_version(version); 109 nfs = find_nfs_version(version);
110 mutex_unlock(&nfs_version_mutex); 110 mutex_unlock(&nfs_version_mutex);
111 } 111 }
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index b701358c39c3..a850079467d8 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -61,6 +61,12 @@ struct idmap {
61 struct mutex idmap_mutex; 61 struct mutex idmap_mutex;
62}; 62};
63 63
64struct idmap_legacy_upcalldata {
65 struct rpc_pipe_msg pipe_msg;
66 struct idmap_msg idmap_msg;
67 struct idmap *idmap;
68};
69
64/** 70/**
65 * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields 71 * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
66 * @fattr: fully initialised struct nfs_fattr 72 * @fattr: fully initialised struct nfs_fattr
@@ -324,6 +330,7 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
324 ret = nfs_idmap_request_key(&key_type_id_resolver_legacy, 330 ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,
325 name, namelen, type, data, 331 name, namelen, type, data,
326 data_size, idmap); 332 data_size, idmap);
333 idmap->idmap_key_cons = NULL;
327 mutex_unlock(&idmap->idmap_mutex); 334 mutex_unlock(&idmap->idmap_mutex);
328 } 335 }
329 return ret; 336 return ret;
@@ -380,11 +387,13 @@ static const match_table_t nfs_idmap_tokens = {
380static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *); 387static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *);
381static ssize_t idmap_pipe_downcall(struct file *, const char __user *, 388static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
382 size_t); 389 size_t);
390static void idmap_release_pipe(struct inode *);
383static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *); 391static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
384 392
385static const struct rpc_pipe_ops idmap_upcall_ops = { 393static const struct rpc_pipe_ops idmap_upcall_ops = {
386 .upcall = rpc_pipe_generic_upcall, 394 .upcall = rpc_pipe_generic_upcall,
387 .downcall = idmap_pipe_downcall, 395 .downcall = idmap_pipe_downcall,
396 .release_pipe = idmap_release_pipe,
388 .destroy_msg = idmap_pipe_destroy_msg, 397 .destroy_msg = idmap_pipe_destroy_msg,
389}; 398};
390 399
@@ -616,7 +625,8 @@ void nfs_idmap_quit(void)
616 nfs_idmap_quit_keyring(); 625 nfs_idmap_quit_keyring();
617} 626}
618 627
619static int nfs_idmap_prepare_message(char *desc, struct idmap_msg *im, 628static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap,
629 struct idmap_msg *im,
620 struct rpc_pipe_msg *msg) 630 struct rpc_pipe_msg *msg)
621{ 631{
622 substring_t substr; 632 substring_t substr;
@@ -659,6 +669,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
659 const char *op, 669 const char *op,
660 void *aux) 670 void *aux)
661{ 671{
672 struct idmap_legacy_upcalldata *data;
662 struct rpc_pipe_msg *msg; 673 struct rpc_pipe_msg *msg;
663 struct idmap_msg *im; 674 struct idmap_msg *im;
664 struct idmap *idmap = (struct idmap *)aux; 675 struct idmap *idmap = (struct idmap *)aux;
@@ -666,15 +677,15 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
666 int ret = -ENOMEM; 677 int ret = -ENOMEM;
667 678
668 /* msg and im are freed in idmap_pipe_destroy_msg */ 679 /* msg and im are freed in idmap_pipe_destroy_msg */
669 msg = kmalloc(sizeof(*msg), GFP_KERNEL); 680 data = kmalloc(sizeof(*data), GFP_KERNEL);
670 if (!msg) 681 if (!data)
671 goto out0;
672
673 im = kmalloc(sizeof(*im), GFP_KERNEL);
674 if (!im)
675 goto out1; 682 goto out1;
676 683
677 ret = nfs_idmap_prepare_message(key->description, im, msg); 684 msg = &data->pipe_msg;
685 im = &data->idmap_msg;
686 data->idmap = idmap;
687
688 ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
678 if (ret < 0) 689 if (ret < 0)
679 goto out2; 690 goto out2;
680 691
@@ -683,15 +694,15 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
683 694
684 ret = rpc_queue_upcall(idmap->idmap_pipe, msg); 695 ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
685 if (ret < 0) 696 if (ret < 0)
686 goto out2; 697 goto out3;
687 698
688 return ret; 699 return ret;
689 700
701out3:
702 idmap->idmap_key_cons = NULL;
690out2: 703out2:
691 kfree(im); 704 kfree(data);
692out1: 705out1:
693 kfree(msg);
694out0:
695 complete_request_key(cons, ret); 706 complete_request_key(cons, ret);
696 return ret; 707 return ret;
697} 708}
@@ -749,9 +760,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
749 } 760 }
750 761
751 if (!(im.im_status & IDMAP_STATUS_SUCCESS)) { 762 if (!(im.im_status & IDMAP_STATUS_SUCCESS)) {
752 ret = mlen; 763 ret = -ENOKEY;
753 complete_request_key(cons, -ENOKEY); 764 goto out;
754 goto out_incomplete;
755 } 765 }
756 766
757 namelen_in = strnlen(im.im_name, IDMAP_NAMESZ); 767 namelen_in = strnlen(im.im_name, IDMAP_NAMESZ);
@@ -768,16 +778,32 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
768 778
769out: 779out:
770 complete_request_key(cons, ret); 780 complete_request_key(cons, ret);
771out_incomplete:
772 return ret; 781 return ret;
773} 782}
774 783
775static void 784static void
776idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg) 785idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
777{ 786{
787 struct idmap_legacy_upcalldata *data = container_of(msg,
788 struct idmap_legacy_upcalldata,
789 pipe_msg);
790 struct idmap *idmap = data->idmap;
791 struct key_construction *cons;
792 if (msg->errno) {
793 cons = ACCESS_ONCE(idmap->idmap_key_cons);
794 idmap->idmap_key_cons = NULL;
795 complete_request_key(cons, msg->errno);
796 }
778 /* Free memory allocated in nfs_idmap_legacy_upcall() */ 797 /* Free memory allocated in nfs_idmap_legacy_upcall() */
779 kfree(msg->data); 798 kfree(data);
780 kfree(msg); 799}
800
801static void
802idmap_release_pipe(struct inode *inode)
803{
804 struct rpc_inode *rpci = RPC_I(inode);
805 struct idmap *idmap = (struct idmap *)rpci->private;
806 idmap->idmap_key_cons = NULL;
781} 807}
782 808
783int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid) 809int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid)
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 0952c791df36..d6b3b5f2d779 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -69,7 +69,7 @@ do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,
69 nfs_fattr_init(info->fattr); 69 nfs_fattr_init(info->fattr);
70 status = rpc_call_sync(client, &msg, 0); 70 status = rpc_call_sync(client, &msg, 0);
71 dprintk("%s: reply fsinfo: %d\n", __func__, status); 71 dprintk("%s: reply fsinfo: %d\n", __func__, status);
72 if (!(info->fattr->valid & NFS_ATTR_FATTR)) { 72 if (status == 0 && !(info->fattr->valid & NFS_ATTR_FATTR)) {
73 msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR]; 73 msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
74 msg.rpc_resp = info->fattr; 74 msg.rpc_resp = info->fattr;
75 status = rpc_call_sync(client, &msg, 0); 75 status = rpc_call_sync(client, &msg, 0);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 3b950dd81e81..da0618aeeadb 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -205,6 +205,9 @@ extern const struct dentry_operations nfs4_dentry_operations;
205int nfs_atomic_open(struct inode *, struct dentry *, struct file *, 205int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
206 unsigned, umode_t, int *); 206 unsigned, umode_t, int *);
207 207
208/* super.c */
209extern struct file_system_type nfs4_fs_type;
210
208/* nfs4namespace.c */ 211/* nfs4namespace.c */
209rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *); 212rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
210struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *); 213struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index cbcdfaf32505..24eb663f8ed5 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -74,7 +74,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
74 return clp; 74 return clp;
75 75
76error: 76error:
77 kfree(clp); 77 nfs_free_client(clp);
78 return ERR_PTR(err); 78 return ERR_PTR(err);
79} 79}
80 80
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a99a8d948721..635274140b18 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3737,9 +3737,10 @@ out:
3737static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 3737static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
3738{ 3738{
3739 struct nfs4_cached_acl *acl; 3739 struct nfs4_cached_acl *acl;
3740 size_t buflen = sizeof(*acl) + acl_len;
3740 3741
3741 if (pages && acl_len <= PAGE_SIZE) { 3742 if (pages && buflen <= PAGE_SIZE) {
3742 acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL); 3743 acl = kmalloc(buflen, GFP_KERNEL);
3743 if (acl == NULL) 3744 if (acl == NULL)
3744 goto out; 3745 goto out;
3745 acl->cached = 1; 3746 acl->cached = 1;
@@ -3819,7 +3820,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
3819 if (ret) 3820 if (ret)
3820 goto out_free; 3821 goto out_free;
3821 3822
3822 acl_len = res.acl_len - res.acl_data_offset; 3823 acl_len = res.acl_len;
3823 if (acl_len > args.acl_len) 3824 if (acl_len > args.acl_len)
3824 nfs4_write_cached_acl(inode, NULL, 0, acl_len); 3825 nfs4_write_cached_acl(inode, NULL, 0, acl_len);
3825 else 3826 else
@@ -6223,11 +6224,58 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
6223 dprintk("<-- %s\n", __func__); 6224 dprintk("<-- %s\n", __func__);
6224} 6225}
6225 6226
6227static size_t max_response_pages(struct nfs_server *server)
6228{
6229 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
6230 return nfs_page_array_len(0, max_resp_sz);
6231}
6232
6233static void nfs4_free_pages(struct page **pages, size_t size)
6234{
6235 int i;
6236
6237 if (!pages)
6238 return;
6239
6240 for (i = 0; i < size; i++) {
6241 if (!pages[i])
6242 break;
6243 __free_page(pages[i]);
6244 }
6245 kfree(pages);
6246}
6247
6248static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
6249{
6250 struct page **pages;
6251 int i;
6252
6253 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
6254 if (!pages) {
6255 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
6256 return NULL;
6257 }
6258
6259 for (i = 0; i < size; i++) {
6260 pages[i] = alloc_page(gfp_flags);
6261 if (!pages[i]) {
6262 dprintk("%s: failed to allocate page\n", __func__);
6263 nfs4_free_pages(pages, size);
6264 return NULL;
6265 }
6266 }
6267
6268 return pages;
6269}
6270
6226static void nfs4_layoutget_release(void *calldata) 6271static void nfs4_layoutget_release(void *calldata)
6227{ 6272{
6228 struct nfs4_layoutget *lgp = calldata; 6273 struct nfs4_layoutget *lgp = calldata;
6274 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6275 size_t max_pages = max_response_pages(server);
6229 6276
6230 dprintk("--> %s\n", __func__); 6277 dprintk("--> %s\n", __func__);
6278 nfs4_free_pages(lgp->args.layout.pages, max_pages);
6231 put_nfs_open_context(lgp->args.ctx); 6279 put_nfs_open_context(lgp->args.ctx);
6232 kfree(calldata); 6280 kfree(calldata);
6233 dprintk("<-- %s\n", __func__); 6281 dprintk("<-- %s\n", __func__);
@@ -6239,9 +6287,10 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
6239 .rpc_release = nfs4_layoutget_release, 6287 .rpc_release = nfs4_layoutget_release,
6240}; 6288};
6241 6289
6242int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) 6290void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
6243{ 6291{
6244 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6292 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6293 size_t max_pages = max_response_pages(server);
6245 struct rpc_task *task; 6294 struct rpc_task *task;
6246 struct rpc_message msg = { 6295 struct rpc_message msg = {
6247 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 6296 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
@@ -6259,12 +6308,19 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
6259 6308
6260 dprintk("--> %s\n", __func__); 6309 dprintk("--> %s\n", __func__);
6261 6310
6311 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
6312 if (!lgp->args.layout.pages) {
6313 nfs4_layoutget_release(lgp);
6314 return;
6315 }
6316 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
6317
6262 lgp->res.layoutp = &lgp->args.layout; 6318 lgp->res.layoutp = &lgp->args.layout;
6263 lgp->res.seq_res.sr_slot = NULL; 6319 lgp->res.seq_res.sr_slot = NULL;
6264 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 6320 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
6265 task = rpc_run_task(&task_setup_data); 6321 task = rpc_run_task(&task_setup_data);
6266 if (IS_ERR(task)) 6322 if (IS_ERR(task))
6267 return PTR_ERR(task); 6323 return;
6268 status = nfs4_wait_for_completion_rpc_task(task); 6324 status = nfs4_wait_for_completion_rpc_task(task);
6269 if (status == 0) 6325 if (status == 0)
6270 status = task->tk_status; 6326 status = task->tk_status;
@@ -6272,7 +6328,7 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
6272 status = pnfs_layout_process(lgp); 6328 status = pnfs_layout_process(lgp);
6273 rpc_put_task(task); 6329 rpc_put_task(task);
6274 dprintk("<-- %s status=%d\n", __func__, status); 6330 dprintk("<-- %s status=%d\n", __func__, status);
6275 return status; 6331 return;
6276} 6332}
6277 6333
6278static void 6334static void
@@ -6304,12 +6360,8 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
6304 return; 6360 return;
6305 } 6361 }
6306 spin_lock(&lo->plh_inode->i_lock); 6362 spin_lock(&lo->plh_inode->i_lock);
6307 if (task->tk_status == 0) { 6363 if (task->tk_status == 0 && lrp->res.lrs_present)
6308 if (lrp->res.lrs_present) { 6364 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
6309 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
6310 } else
6311 BUG_ON(!list_empty(&lo->plh_segs));
6312 }
6313 lo->plh_block_lgets--; 6365 lo->plh_block_lgets--;
6314 spin_unlock(&lo->plh_inode->i_lock); 6366 spin_unlock(&lo->plh_inode->i_lock);
6315 dprintk("<-- %s\n", __func__); 6367 dprintk("<-- %s\n", __func__);
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index 12a31a9dbcdd..bd61221ad2c5 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -23,14 +23,6 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
23static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type, 23static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type,
24 int flags, const char *dev_name, void *raw_data); 24 int flags, const char *dev_name, void *raw_data);
25 25
26static struct file_system_type nfs4_fs_type = {
27 .owner = THIS_MODULE,
28 .name = "nfs4",
29 .mount = nfs_fs_mount,
30 .kill_sb = nfs_kill_super,
31 .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
32};
33
34static struct file_system_type nfs4_remote_fs_type = { 26static struct file_system_type nfs4_remote_fs_type = {
35 .owner = THIS_MODULE, 27 .owner = THIS_MODULE,
36 .name = "nfs4", 28 .name = "nfs4",
@@ -344,14 +336,8 @@ static int __init init_nfs_v4(void)
344 if (err) 336 if (err)
345 goto out1; 337 goto out1;
346 338
347 err = register_filesystem(&nfs4_fs_type);
348 if (err < 0)
349 goto out2;
350
351 register_nfs_version(&nfs_v4); 339 register_nfs_version(&nfs_v4);
352 return 0; 340 return 0;
353out2:
354 nfs4_unregister_sysctl();
355out1: 341out1:
356 nfs_idmap_quit(); 342 nfs_idmap_quit();
357out: 343out:
@@ -361,7 +347,6 @@ out:
361static void __exit exit_nfs_v4(void) 347static void __exit exit_nfs_v4(void)
362{ 348{
363 unregister_nfs_version(&nfs_v4); 349 unregister_nfs_version(&nfs_v4);
364 unregister_filesystem(&nfs4_fs_type);
365 nfs4_unregister_sysctl(); 350 nfs4_unregister_sysctl();
366 nfs_idmap_quit(); 351 nfs_idmap_quit();
367} 352}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index ca13483edd60..1bfbd67c556d 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -5045,22 +5045,19 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
5045 struct nfs_getaclres *res) 5045 struct nfs_getaclres *res)
5046{ 5046{
5047 unsigned int savep; 5047 unsigned int savep;
5048 __be32 *bm_p;
5049 uint32_t attrlen, 5048 uint32_t attrlen,
5050 bitmap[3] = {0}; 5049 bitmap[3] = {0};
5051 int status; 5050 int status;
5052 size_t page_len = xdr->buf->page_len; 5051 unsigned int pg_offset;
5053 5052
5054 res->acl_len = 0; 5053 res->acl_len = 0;
5055 if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) 5054 if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
5056 goto out; 5055 goto out;
5057 5056
5058 bm_p = xdr->p; 5057 xdr_enter_page(xdr, xdr->buf->page_len);
5059 res->acl_data_offset = be32_to_cpup(bm_p) + 2; 5058
5060 res->acl_data_offset <<= 2; 5059 /* Calculate the offset of the page data */
5061 /* Check if the acl data starts beyond the allocated buffer */ 5060 pg_offset = xdr->buf->head[0].iov_len;
5062 if (res->acl_data_offset > page_len)
5063 return -ERANGE;
5064 5061
5065 if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) 5062 if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
5066 goto out; 5063 goto out;
@@ -5074,23 +5071,20 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
5074 /* The bitmap (xdr len + bitmaps) and the attr xdr len words 5071 /* The bitmap (xdr len + bitmaps) and the attr xdr len words
5075 * are stored with the acl data to handle the problem of 5072 * are stored with the acl data to handle the problem of
5076 * variable length bitmaps.*/ 5073 * variable length bitmaps.*/
5077 xdr->p = bm_p; 5074 res->acl_data_offset = xdr_stream_pos(xdr) - pg_offset;
5078 5075
5079 /* We ignore &savep and don't do consistency checks on 5076 /* We ignore &savep and don't do consistency checks on
5080 * the attr length. Let userspace figure it out.... */ 5077 * the attr length. Let userspace figure it out.... */
5081 attrlen += res->acl_data_offset; 5078 res->acl_len = attrlen;
5082 if (attrlen > page_len) { 5079 if (attrlen > (xdr->nwords << 2)) {
5083 if (res->acl_flags & NFS4_ACL_LEN_REQUEST) { 5080 if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
5084 /* getxattr interface called with a NULL buf */ 5081 /* getxattr interface called with a NULL buf */
5085 res->acl_len = attrlen;
5086 goto out; 5082 goto out;
5087 } 5083 }
5088 dprintk("NFS: acl reply: attrlen %u > page_len %zu\n", 5084 dprintk("NFS: acl reply: attrlen %u > page_len %u\n",
5089 attrlen, page_len); 5085 attrlen, xdr->nwords << 2);
5090 return -EINVAL; 5086 return -EINVAL;
5091 } 5087 }
5092 xdr_read_pages(xdr, attrlen);
5093 res->acl_len = attrlen;
5094 } else 5088 } else
5095 status = -EOPNOTSUPP; 5089 status = -EOPNOTSUPP;
5096 5090
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index f50d3e8d6f22..ea6d111b03e9 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -570,17 +570,66 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
570 return false; 570 return false;
571 571
572 return pgio->pg_count + req->wb_bytes <= 572 return pgio->pg_count + req->wb_bytes <=
573 OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length; 573 (unsigned long)pgio->pg_layout_private;
574}
575
576void objio_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
577{
578 pnfs_generic_pg_init_read(pgio, req);
579 if (unlikely(pgio->pg_lseg == NULL))
580 return; /* Not pNFS */
581
582 pgio->pg_layout_private = (void *)
583 OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
584}
585
586static bool aligned_on_raid_stripe(u64 offset, struct ore_layout *layout,
587 unsigned long *stripe_end)
588{
589 u32 stripe_off;
590 unsigned stripe_size;
591
592 if (layout->raid_algorithm == PNFS_OSD_RAID_0)
593 return true;
594
595 stripe_size = layout->stripe_unit *
596 (layout->group_width - layout->parity);
597
598 div_u64_rem(offset, stripe_size, &stripe_off);
599 if (!stripe_off)
600 return true;
601
602 *stripe_end = stripe_size - stripe_off;
603 return false;
604}
605
606void objio_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
607{
608 unsigned long stripe_end = 0;
609
610 pnfs_generic_pg_init_write(pgio, req);
611 if (unlikely(pgio->pg_lseg == NULL))
612 return; /* Not pNFS */
613
614 if (req->wb_offset ||
615 !aligned_on_raid_stripe(req->wb_index * PAGE_SIZE,
616 &OBJIO_LSEG(pgio->pg_lseg)->layout,
617 &stripe_end)) {
618 pgio->pg_layout_private = (void *)stripe_end;
619 } else {
620 pgio->pg_layout_private = (void *)
621 OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
622 }
574} 623}
575 624
576static const struct nfs_pageio_ops objio_pg_read_ops = { 625static const struct nfs_pageio_ops objio_pg_read_ops = {
577 .pg_init = pnfs_generic_pg_init_read, 626 .pg_init = objio_init_read,
578 .pg_test = objio_pg_test, 627 .pg_test = objio_pg_test,
579 .pg_doio = pnfs_generic_pg_readpages, 628 .pg_doio = pnfs_generic_pg_readpages,
580}; 629};
581 630
582static const struct nfs_pageio_ops objio_pg_write_ops = { 631static const struct nfs_pageio_ops objio_pg_write_ops = {
583 .pg_init = pnfs_generic_pg_init_write, 632 .pg_init = objio_init_write,
584 .pg_test = objio_pg_test, 633 .pg_test = objio_pg_test,
585 .pg_doio = pnfs_generic_pg_writepages, 634 .pg_doio = pnfs_generic_pg_writepages,
586}; 635};
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 1a6732ed04a4..311a79681e2b 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -49,6 +49,7 @@ void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
49 hdr->io_start = req_offset(hdr->req); 49 hdr->io_start = req_offset(hdr->req);
50 hdr->good_bytes = desc->pg_count; 50 hdr->good_bytes = desc->pg_count;
51 hdr->dreq = desc->pg_dreq; 51 hdr->dreq = desc->pg_dreq;
52 hdr->layout_private = desc->pg_layout_private;
52 hdr->release = release; 53 hdr->release = release;
53 hdr->completion_ops = desc->pg_completion_ops; 54 hdr->completion_ops = desc->pg_completion_ops;
54 if (hdr->completion_ops->init_hdr) 55 if (hdr->completion_ops->init_hdr)
@@ -268,6 +269,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
268 desc->pg_error = 0; 269 desc->pg_error = 0;
269 desc->pg_lseg = NULL; 270 desc->pg_lseg = NULL;
270 desc->pg_dreq = NULL; 271 desc->pg_dreq = NULL;
272 desc->pg_layout_private = NULL;
271} 273}
272EXPORT_SYMBOL_GPL(nfs_pageio_init); 274EXPORT_SYMBOL_GPL(nfs_pageio_init);
273 275
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 76875bfcf19c..2e00feacd4be 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -583,9 +583,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
583 struct nfs_server *server = NFS_SERVER(ino); 583 struct nfs_server *server = NFS_SERVER(ino);
584 struct nfs4_layoutget *lgp; 584 struct nfs4_layoutget *lgp;
585 struct pnfs_layout_segment *lseg = NULL; 585 struct pnfs_layout_segment *lseg = NULL;
586 struct page **pages = NULL;
587 int i;
588 u32 max_resp_sz, max_pages;
589 586
590 dprintk("--> %s\n", __func__); 587 dprintk("--> %s\n", __func__);
591 588
@@ -594,20 +591,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
594 if (lgp == NULL) 591 if (lgp == NULL)
595 return NULL; 592 return NULL;
596 593
597 /* allocate pages for xdr post processing */
598 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
599 max_pages = nfs_page_array_len(0, max_resp_sz);
600
601 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
602 if (!pages)
603 goto out_err_free;
604
605 for (i = 0; i < max_pages; i++) {
606 pages[i] = alloc_page(gfp_flags);
607 if (!pages[i])
608 goto out_err_free;
609 }
610
611 lgp->args.minlength = PAGE_CACHE_SIZE; 594 lgp->args.minlength = PAGE_CACHE_SIZE;
612 if (lgp->args.minlength > range->length) 595 if (lgp->args.minlength > range->length)
613 lgp->args.minlength = range->length; 596 lgp->args.minlength = range->length;
@@ -616,39 +599,19 @@ send_layoutget(struct pnfs_layout_hdr *lo,
616 lgp->args.type = server->pnfs_curr_ld->id; 599 lgp->args.type = server->pnfs_curr_ld->id;
617 lgp->args.inode = ino; 600 lgp->args.inode = ino;
618 lgp->args.ctx = get_nfs_open_context(ctx); 601 lgp->args.ctx = get_nfs_open_context(ctx);
619 lgp->args.layout.pages = pages;
620 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
621 lgp->lsegpp = &lseg; 602 lgp->lsegpp = &lseg;
622 lgp->gfp_flags = gfp_flags; 603 lgp->gfp_flags = gfp_flags;
623 604
624 /* Synchronously retrieve layout information from server and 605 /* Synchronously retrieve layout information from server and
625 * store in lseg. 606 * store in lseg.
626 */ 607 */
627 nfs4_proc_layoutget(lgp); 608 nfs4_proc_layoutget(lgp, gfp_flags);
628 if (!lseg) { 609 if (!lseg) {
629 /* remember that LAYOUTGET failed and suspend trying */ 610 /* remember that LAYOUTGET failed and suspend trying */
630 set_bit(lo_fail_bit(range->iomode), &lo->plh_flags); 611 set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
631 } 612 }
632 613
633 /* free xdr pages */
634 for (i = 0; i < max_pages; i++)
635 __free_page(pages[i]);
636 kfree(pages);
637
638 return lseg; 614 return lseg;
639
640out_err_free:
641 /* free any allocated xdr pages, lgp as it's not used */
642 if (pages) {
643 for (i = 0; i < max_pages; i++) {
644 if (!pages[i])
645 break;
646 __free_page(pages[i]);
647 }
648 kfree(pages);
649 }
650 kfree(lgp);
651 return NULL;
652} 615}
653 616
654/* 617/*
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 2c6c80503ba4..745aa1b39e7c 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -172,7 +172,7 @@ extern int nfs4_proc_getdevicelist(struct nfs_server *server,
172 struct pnfs_devicelist *devlist); 172 struct pnfs_devicelist *devlist);
173extern int nfs4_proc_getdeviceinfo(struct nfs_server *server, 173extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
174 struct pnfs_device *dev); 174 struct pnfs_device *dev);
175extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp); 175extern void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
176extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp); 176extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
177 177
178/* pnfs.c */ 178/* pnfs.c */
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ac6a3c55dce4..239aff7338eb 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -319,6 +319,34 @@ EXPORT_SYMBOL_GPL(nfs_sops);
319static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *); 319static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *);
320static int nfs4_validate_mount_data(void *options, 320static int nfs4_validate_mount_data(void *options,
321 struct nfs_parsed_mount_data *args, const char *dev_name); 321 struct nfs_parsed_mount_data *args, const char *dev_name);
322
323struct file_system_type nfs4_fs_type = {
324 .owner = THIS_MODULE,
325 .name = "nfs4",
326 .mount = nfs_fs_mount,
327 .kill_sb = nfs_kill_super,
328 .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
329};
330EXPORT_SYMBOL_GPL(nfs4_fs_type);
331
332static int __init register_nfs4_fs(void)
333{
334 return register_filesystem(&nfs4_fs_type);
335}
336
337static void unregister_nfs4_fs(void)
338{
339 unregister_filesystem(&nfs4_fs_type);
340}
341#else
342static int __init register_nfs4_fs(void)
343{
344 return 0;
345}
346
347static void unregister_nfs4_fs(void)
348{
349}
322#endif 350#endif
323 351
324static struct shrinker acl_shrinker = { 352static struct shrinker acl_shrinker = {
@@ -337,12 +365,18 @@ int __init register_nfs_fs(void)
337 if (ret < 0) 365 if (ret < 0)
338 goto error_0; 366 goto error_0;
339 367
340 ret = nfs_register_sysctl(); 368 ret = register_nfs4_fs();
341 if (ret < 0) 369 if (ret < 0)
342 goto error_1; 370 goto error_1;
371
372 ret = nfs_register_sysctl();
373 if (ret < 0)
374 goto error_2;
343 register_shrinker(&acl_shrinker); 375 register_shrinker(&acl_shrinker);
344 return 0; 376 return 0;
345 377
378error_2:
379 unregister_nfs4_fs();
346error_1: 380error_1:
347 unregister_filesystem(&nfs_fs_type); 381 unregister_filesystem(&nfs_fs_type);
348error_0: 382error_0:
@@ -356,6 +390,7 @@ void __exit unregister_nfs_fs(void)
356{ 390{
357 unregister_shrinker(&acl_shrinker); 391 unregister_shrinker(&acl_shrinker);
358 nfs_unregister_sysctl(); 392 nfs_unregister_sysctl();
393 unregister_nfs4_fs();
359 unregister_filesystem(&nfs_fs_type); 394 unregister_filesystem(&nfs_fs_type);
360} 395}
361 396
@@ -2645,4 +2680,6 @@ MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
2645module_param(send_implementation_id, ushort, 0644); 2680module_param(send_implementation_id, ushort, 0644);
2646MODULE_PARM_DESC(send_implementation_id, 2681MODULE_PARM_DESC(send_implementation_id,
2647 "Send implementation ID with NFSv4.1 exchange_id"); 2682 "Send implementation ID with NFSv4.1 exchange_id");
2683MODULE_ALIAS("nfs4");
2684
2648#endif /* CONFIG_NFS_V4 */ 2685#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5829d0ce7cfb..e3b55372726c 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1814,19 +1814,19 @@ int __init nfs_init_writepagecache(void)
1814 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, 1814 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1815 nfs_wdata_cachep); 1815 nfs_wdata_cachep);
1816 if (nfs_wdata_mempool == NULL) 1816 if (nfs_wdata_mempool == NULL)
1817 return -ENOMEM; 1817 goto out_destroy_write_cache;
1818 1818
1819 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", 1819 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
1820 sizeof(struct nfs_commit_data), 1820 sizeof(struct nfs_commit_data),
1821 0, SLAB_HWCACHE_ALIGN, 1821 0, SLAB_HWCACHE_ALIGN,
1822 NULL); 1822 NULL);
1823 if (nfs_cdata_cachep == NULL) 1823 if (nfs_cdata_cachep == NULL)
1824 return -ENOMEM; 1824 goto out_destroy_write_mempool;
1825 1825
1826 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, 1826 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1827 nfs_wdata_cachep); 1827 nfs_wdata_cachep);
1828 if (nfs_commit_mempool == NULL) 1828 if (nfs_commit_mempool == NULL)
1829 return -ENOMEM; 1829 goto out_destroy_commit_cache;
1830 1830
1831 /* 1831 /*
1832 * NFS congestion size, scale with available memory. 1832 * NFS congestion size, scale with available memory.
@@ -1849,11 +1849,20 @@ int __init nfs_init_writepagecache(void)
1849 nfs_congestion_kb = 256*1024; 1849 nfs_congestion_kb = 256*1024;
1850 1850
1851 return 0; 1851 return 0;
1852
1853out_destroy_commit_cache:
1854 kmem_cache_destroy(nfs_cdata_cachep);
1855out_destroy_write_mempool:
1856 mempool_destroy(nfs_wdata_mempool);
1857out_destroy_write_cache:
1858 kmem_cache_destroy(nfs_wdata_cachep);
1859 return -ENOMEM;
1852} 1860}
1853 1861
1854void nfs_destroy_writepagecache(void) 1862void nfs_destroy_writepagecache(void)
1855{ 1863{
1856 mempool_destroy(nfs_commit_mempool); 1864 mempool_destroy(nfs_commit_mempool);
1865 kmem_cache_destroy(nfs_cdata_cachep);
1857 mempool_destroy(nfs_wdata_mempool); 1866 mempool_destroy(nfs_wdata_mempool);
1858 kmem_cache_destroy(nfs_wdata_cachep); 1867 kmem_cache_destroy(nfs_wdata_cachep);
1859} 1868}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index cbaf4f8bb7b7..4c7bd35b1876 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -651,12 +651,12 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
651 651
652 if (clp->cl_minorversion == 0) { 652 if (clp->cl_minorversion == 0) {
653 if (!clp->cl_cred.cr_principal && 653 if (!clp->cl_cred.cr_principal &&
654 (clp->cl_flavor >= RPC_AUTH_GSS_KRB5)) 654 (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
655 return -EINVAL; 655 return -EINVAL;
656 args.client_name = clp->cl_cred.cr_principal; 656 args.client_name = clp->cl_cred.cr_principal;
657 args.prognumber = conn->cb_prog, 657 args.prognumber = conn->cb_prog,
658 args.protocol = XPRT_TRANSPORT_TCP; 658 args.protocol = XPRT_TRANSPORT_TCP;
659 args.authflavor = clp->cl_flavor; 659 args.authflavor = clp->cl_cred.cr_flavor;
660 clp->cl_cb_ident = conn->cb_ident; 660 clp->cl_cb_ident = conn->cb_ident;
661 } else { 661 } else {
662 if (!conn->cb_xprt) 662 if (!conn->cb_xprt)
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index e6173147f982..22bd0a66c356 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -231,7 +231,6 @@ struct nfs4_client {
231 nfs4_verifier cl_verifier; /* generated by client */ 231 nfs4_verifier cl_verifier; /* generated by client */
232 time_t cl_time; /* time of last lease renewal */ 232 time_t cl_time; /* time of last lease renewal */
233 struct sockaddr_storage cl_addr; /* client ipaddress */ 233 struct sockaddr_storage cl_addr; /* client ipaddress */
234 u32 cl_flavor; /* setclientid pseudoflavor */
235 struct svc_cred cl_cred; /* setclientid principal */ 234 struct svc_cred cl_cred; /* setclientid principal */
236 clientid_t cl_clientid; /* generated by server */ 235 clientid_t cl_clientid; /* generated by server */
237 nfs4_verifier cl_confirm; /* generated by server */ 236 nfs4_verifier cl_confirm; /* generated by server */
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 36a29b753c79..c495a3055e2a 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1589,10 +1589,10 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1589 goto out; 1589 goto out;
1590 } 1590 }
1591 1591
1592 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1593 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1592 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1594 warn[cnt].w_type = QUOTA_NL_NOWARN; 1593 warn[cnt].w_type = QUOTA_NL_NOWARN;
1595 1594
1595 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1596 spin_lock(&dq_data_lock); 1596 spin_lock(&dq_data_lock);
1597 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1597 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1598 if (!dquots[cnt]) 1598 if (!dquots[cnt])
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index 4c0c7d163d15..a98b7740a0fc 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -1334,9 +1334,7 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
1334 else if (bitmap == 0) 1334 else if (bitmap == 0)
1335 block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1; 1335 block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
1336 1336
1337 reiserfs_write_unlock(sb);
1338 bh = sb_bread(sb, block); 1337 bh = sb_bread(sb, block);
1339 reiserfs_write_lock(sb);
1340 if (bh == NULL) 1338 if (bh == NULL)
1341 reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) " 1339 reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
1342 "reading failed", __func__, block); 1340 "reading failed", __func__, block);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index a6d4268fb6c1..855da58db145 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -76,10 +76,10 @@ void reiserfs_evict_inode(struct inode *inode)
76 ; 76 ;
77 } 77 }
78 out: 78 out:
79 reiserfs_write_unlock_once(inode->i_sb, depth);
79 clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */ 80 clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */
80 dquot_drop(inode); 81 dquot_drop(inode);
81 inode->i_blocks = 0; 82 inode->i_blocks = 0;
82 reiserfs_write_unlock_once(inode->i_sb, depth);
83 return; 83 return;
84 84
85no_delete: 85no_delete:
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 8b8cc4e945f4..760de723dadb 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -167,7 +167,7 @@ struct ubifs_global_debug_info {
167#define ubifs_dbg_msg(type, fmt, ...) \ 167#define ubifs_dbg_msg(type, fmt, ...) \
168 pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__) 168 pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
169 169
170#define DBG_KEY_BUF_LEN 32 170#define DBG_KEY_BUF_LEN 48
171#define ubifs_dbg_msg_key(type, key, fmt, ...) do { \ 171#define ubifs_dbg_msg_key(type, key, fmt, ...) do { \
172 char __tmp_key_buf[DBG_KEY_BUF_LEN]; \ 172 char __tmp_key_buf[DBG_KEY_BUF_LEN]; \
173 pr_debug("UBIFS DBG " type ": " fmt "%s\n", ##__VA_ARGS__, \ 173 pr_debug("UBIFS DBG " type ": " fmt "%s\n", ##__VA_ARGS__, \
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index ce33b2beb151..8640920766ed 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -1749,7 +1749,10 @@ int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr)
1749 return 0; 1749 return 0;
1750 1750
1751out_err: 1751out_err:
1752 ubifs_lpt_free(c, 0); 1752 if (wr)
1753 ubifs_lpt_free(c, 1);
1754 if (rd)
1755 ubifs_lpt_free(c, 0);
1753 return err; 1756 return err;
1754} 1757}
1755 1758
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index c30d976b4be8..edeec499c048 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -788,7 +788,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
788 788
789corrupted_rescan: 789corrupted_rescan:
790 /* Re-scan the corrupted data with verbose messages */ 790 /* Re-scan the corrupted data with verbose messages */
791 ubifs_err("corruptio %d", ret); 791 ubifs_err("corruption %d", ret);
792 ubifs_scan_a_node(c, buf, len, lnum, offs, 1); 792 ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
793corrupted: 793corrupted:
794 ubifs_scanned_corruption(c, lnum, offs, buf); 794 ubifs_scanned_corruption(c, lnum, offs, buf);
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index eba46d4a7619..94d78fc5d4e0 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -1026,7 +1026,6 @@ int ubifs_replay_journal(struct ubifs_info *c)
1026 c->replaying = 1; 1026 c->replaying = 1;
1027 lnum = c->ltail_lnum = c->lhead_lnum; 1027 lnum = c->ltail_lnum = c->lhead_lnum;
1028 1028
1029 lnum = UBIFS_LOG_LNUM;
1030 do { 1029 do {
1031 err = replay_log_leb(c, lnum, 0, c->sbuf); 1030 err = replay_log_leb(c, lnum, 0, c->sbuf);
1032 if (err == 1) 1031 if (err == 1)
@@ -1035,7 +1034,7 @@ int ubifs_replay_journal(struct ubifs_info *c)
1035 if (err) 1034 if (err)
1036 goto out; 1035 goto out;
1037 lnum = ubifs_next_log_lnum(c, lnum); 1036 lnum = ubifs_next_log_lnum(c, lnum);
1038 } while (lnum != UBIFS_LOG_LNUM); 1037 } while (lnum != c->ltail_lnum);
1039 1038
1040 err = replay_buds(c); 1039 err = replay_buds(c);
1041 if (err) 1040 if (err)
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index c3fa6c5327a3..71a197f0f93d 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1157,9 +1157,6 @@ static int check_free_space(struct ubifs_info *c)
1157 * 1157 *
1158 * This function mounts UBIFS file system. Returns zero in case of success and 1158 * This function mounts UBIFS file system. Returns zero in case of success and
1159 * a negative error code in case of failure. 1159 * a negative error code in case of failure.
1160 *
1161 * Note, the function does not de-allocate resources it it fails half way
1162 * through, and the caller has to do this instead.
1163 */ 1160 */
1164static int mount_ubifs(struct ubifs_info *c) 1161static int mount_ubifs(struct ubifs_info *c)
1165{ 1162{
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index fafaad795cd6..aa233469b3c1 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1124,14 +1124,17 @@ int udf_setsize(struct inode *inode, loff_t newsize)
1124 if (err) 1124 if (err)
1125 return err; 1125 return err;
1126 down_write(&iinfo->i_data_sem); 1126 down_write(&iinfo->i_data_sem);
1127 } else 1127 } else {
1128 iinfo->i_lenAlloc = newsize; 1128 iinfo->i_lenAlloc = newsize;
1129 goto set_size;
1130 }
1129 } 1131 }
1130 err = udf_extend_file(inode, newsize); 1132 err = udf_extend_file(inode, newsize);
1131 if (err) { 1133 if (err) {
1132 up_write(&iinfo->i_data_sem); 1134 up_write(&iinfo->i_data_sem);
1133 return err; 1135 return err;
1134 } 1136 }
1137set_size:
1135 truncate_setsize(inode, newsize); 1138 truncate_setsize(inode, newsize);
1136 up_write(&iinfo->i_data_sem); 1139 up_write(&iinfo->i_data_sem);
1137 } else { 1140 } else {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index dcbf98722afc..18fc038a438d 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1344,6 +1344,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1344 udf_err(sb, "error loading logical volume descriptor: " 1344 udf_err(sb, "error loading logical volume descriptor: "
1345 "Partition table too long (%u > %lu)\n", table_len, 1345 "Partition table too long (%u > %lu)\n", table_len,
1346 sb->s_blocksize - sizeof(*lvd)); 1346 sb->s_blocksize - sizeof(*lvd));
1347 ret = 1;
1347 goto out_bh; 1348 goto out_bh;
1348 } 1349 }
1349 1350
@@ -1388,8 +1389,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1388 UDF_ID_SPARABLE, 1389 UDF_ID_SPARABLE,
1389 strlen(UDF_ID_SPARABLE))) { 1390 strlen(UDF_ID_SPARABLE))) {
1390 if (udf_load_sparable_map(sb, map, 1391 if (udf_load_sparable_map(sb, map,
1391 (struct sparablePartitionMap *)gpm) < 0) 1392 (struct sparablePartitionMap *)gpm) < 0) {
1393 ret = 1;
1392 goto out_bh; 1394 goto out_bh;
1395 }
1393 } else if (!strncmp(upm2->partIdent.ident, 1396 } else if (!strncmp(upm2->partIdent.ident,
1394 UDF_ID_METADATA, 1397 UDF_ID_METADATA,
1395 strlen(UDF_ID_METADATA))) { 1398 strlen(UDF_ID_METADATA))) {
@@ -2000,6 +2003,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
2000 if (!silent) 2003 if (!silent)
2001 pr_notice("Rescanning with blocksize %d\n", 2004 pr_notice("Rescanning with blocksize %d\n",
2002 UDF_DEFAULT_BLOCKSIZE); 2005 UDF_DEFAULT_BLOCKSIZE);
2006 brelse(sbi->s_lvid_bh);
2007 sbi->s_lvid_bh = NULL;
2003 uopt.blocksize = UDF_DEFAULT_BLOCKSIZE; 2008 uopt.blocksize = UDF_DEFAULT_BLOCKSIZE;
2004 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 2009 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2005 } 2010 }
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index f9c3fe304a17..69cf4fcde03e 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -179,12 +179,14 @@ xfs_ioc_trim(
179 * used by the fstrim application. In the end it really doesn't 179 * used by the fstrim application. In the end it really doesn't
180 * matter as trimming blocks is an advisory interface. 180 * matter as trimming blocks is an advisory interface.
181 */ 181 */
182 if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
183 range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)))
184 return -XFS_ERROR(EINVAL);
185
182 start = BTOBB(range.start); 186 start = BTOBB(range.start);
183 end = start + BTOBBT(range.len) - 1; 187 end = start + BTOBBT(range.len) - 1;
184 minlen = BTOBB(max_t(u64, granularity, range.minlen)); 188 minlen = BTOBB(max_t(u64, granularity, range.minlen));
185 189
186 if (XFS_BB_TO_FSB(mp, start) >= mp->m_sb.sb_dblocks)
187 return -XFS_ERROR(EINVAL);
188 if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1) 190 if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1)
189 end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1; 191 end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1;
190 192
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 21e37b55f7e5..5aceb3f8ecd6 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -962,23 +962,22 @@ xfs_dialloc(
962 if (!pag->pagi_freecount && !okalloc) 962 if (!pag->pagi_freecount && !okalloc)
963 goto nextag; 963 goto nextag;
964 964
965 /*
966 * Then read in the AGI buffer and recheck with the AGI buffer
967 * lock held.
968 */
965 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); 969 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
966 if (error) 970 if (error)
967 goto out_error; 971 goto out_error;
968 972
969 /*
970 * Once the AGI has been read in we have to recheck
971 * pagi_freecount with the AGI buffer lock held.
972 */
973 if (pag->pagi_freecount) { 973 if (pag->pagi_freecount) {
974 xfs_perag_put(pag); 974 xfs_perag_put(pag);
975 goto out_alloc; 975 goto out_alloc;
976 } 976 }
977 977
978 if (!okalloc) { 978 if (!okalloc)
979 xfs_trans_brelse(tp, agbp); 979 goto nextag_relse_buffer;
980 goto nextag; 980
981 }
982 981
983 error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced); 982 error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced);
984 if (error) { 983 if (error) {
@@ -1007,6 +1006,8 @@ xfs_dialloc(
1007 return 0; 1006 return 0;
1008 } 1007 }
1009 1008
1009nextag_relse_buffer:
1010 xfs_trans_brelse(tp, agbp);
1010nextag: 1011nextag:
1011 xfs_perag_put(pag); 1012 xfs_perag_put(pag);
1012 if (++agno == mp->m_sb.sb_agcount) 1013 if (++agno == mp->m_sb.sb_agcount)
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 92d4331cd4f1..ca28a4ba4b54 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -857,7 +857,7 @@ xfs_rtbuf_get(
857 xfs_buf_t *bp; /* block buffer, result */ 857 xfs_buf_t *bp; /* block buffer, result */
858 xfs_inode_t *ip; /* bitmap or summary inode */ 858 xfs_inode_t *ip; /* bitmap or summary inode */
859 xfs_bmbt_irec_t map; 859 xfs_bmbt_irec_t map;
860 int nmap; 860 int nmap = 1;
861 int error; /* error value */ 861 int error; /* error value */
862 862
863 ip = issum ? mp->m_rsumip : mp->m_rbmip; 863 ip = issum ? mp->m_rsumip : mp->m_rbmip;
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 580a6d35c700..c04e0db8a2d6 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -26,7 +26,13 @@ static inline void
26__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) 26__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
27{ 27{
28 if (unlikely(atomic_xchg(count, 0) != 1)) 28 if (unlikely(atomic_xchg(count, 0) != 1))
29 fail_fn(count); 29 /*
30 * We failed to acquire the lock, so mark it contended
31 * to ensure that any waiting tasks are woken up by the
32 * unlock slow path.
33 */
34 if (likely(atomic_xchg(count, -1) != 1))
35 fail_fn(count);
30} 36}
31 37
32/** 38/**
@@ -43,7 +49,8 @@ static inline int
43__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 49__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
44{ 50{
45 if (unlikely(atomic_xchg(count, 0) != 1)) 51 if (unlikely(atomic_xchg(count, 0) != 1))
46 return fail_fn(count); 52 if (likely(atomic_xchg(count, -1) != 1))
53 return fail_fn(count);
47 return 0; 54 return 0;
48} 55}
49 56
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index a1a0386e0160..bfacf0d5a225 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -118,7 +118,8 @@ enum drm_mode_status {
118 .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ 118 .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
119 .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ 119 .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
120 .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ 120 .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
121 .vscan = (vs), .flags = (f), .vrefresh = 0 121 .vscan = (vs), .flags = (f), .vrefresh = 0, \
122 .base.type = DRM_MODE_OBJECT_MODE
122 123
123#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */ 124#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
124 125
@@ -166,8 +167,6 @@ struct drm_display_mode {
166 int crtc_vsync_start; 167 int crtc_vsync_start;
167 int crtc_vsync_end; 168 int crtc_vsync_end;
168 int crtc_vtotal; 169 int crtc_vtotal;
169 int crtc_hadjusted;
170 int crtc_vadjusted;
171 170
172 /* Driver private mode info */ 171 /* Driver private mode info */
173 int private_size; 172 int private_size;
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 5581980b14f6..3d6301b6ec16 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -359,8 +359,9 @@ struct drm_mode_mode_cmd {
359 struct drm_mode_modeinfo mode; 359 struct drm_mode_modeinfo mode;
360}; 360};
361 361
362#define DRM_MODE_CURSOR_BO (1<<0) 362#define DRM_MODE_CURSOR_BO 0x01
363#define DRM_MODE_CURSOR_MOVE (1<<1) 363#define DRM_MODE_CURSOR_MOVE 0x02
364#define DRM_MODE_CURSOR_FLAGS 0x03
364 365
365/* 366/*
366 * depending on the value in flags different members are used. 367 * depending on the value in flags different members are used.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4e72a9d48232..4a2ab7c85393 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -601,7 +601,7 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
601 * it already be started by driver. 601 * it already be started by driver.
602 */ 602 */
603#define RQ_NOMERGE_FLAGS \ 603#define RQ_NOMERGE_FLAGS \
604 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) 604 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_DISCARD)
605#define rq_mergeable(rq) \ 605#define rq_mergeable(rq) \
606 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 606 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
607 (((rq)->cmd_flags & REQ_DISCARD) || \ 607 (((rq)->cmd_flags & REQ_DISCARD) || \
@@ -894,6 +894,8 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
894extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 894extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
895 895
896extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 896extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
897extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
898 struct scatterlist *sglist);
897extern void blk_dump_rq_flags(struct request *, char *); 899extern void blk_dump_rq_flags(struct request *, char *);
898extern long nr_blockdev_pages(void); 900extern long nr_blockdev_pages(void);
899 901
@@ -1139,6 +1141,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
1139 & (lim->discard_granularity - 1); 1141 & (lim->discard_granularity - 1);
1140} 1142}
1141 1143
1144static inline int bdev_discard_alignment(struct block_device *bdev)
1145{
1146 struct request_queue *q = bdev_get_queue(bdev);
1147
1148 if (bdev != bdev->bd_contains)
1149 return bdev->bd_part->discard_alignment;
1150
1151 return q->limits.discard_alignment;
1152}
1153
1142static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1154static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1143{ 1155{
1144 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 1156 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 133ddcf83397..ef658147e4e8 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
22extern int fragmentation_index(struct zone *zone, unsigned int order); 22extern int fragmentation_index(struct zone *zone, unsigned int order);
23extern unsigned long try_to_compact_pages(struct zonelist *zonelist, 23extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
24 int order, gfp_t gfp_mask, nodemask_t *mask, 24 int order, gfp_t gfp_mask, nodemask_t *mask,
25 bool sync); 25 bool sync, bool *contended);
26extern int compact_pgdat(pg_data_t *pgdat, int order); 26extern int compact_pgdat(pg_data_t *pgdat, int order);
27extern unsigned long compaction_suitable(struct zone *zone, int order); 27extern unsigned long compaction_suitable(struct zone *zone, int order);
28 28
@@ -64,7 +64,7 @@ static inline bool compaction_deferred(struct zone *zone, int order)
64#else 64#else
65static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, 65static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
66 int order, gfp_t gfp_mask, nodemask_t *nodemask, 66 int order, gfp_t gfp_mask, nodemask_t *nodemask,
67 bool sync) 67 bool sync, bool *contended)
68{ 68{
69 return COMPACT_CONTINUE; 69 return COMPACT_CONTINUE;
70} 70}
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 040b13b5c14a..279b1eaa8b73 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -194,6 +194,10 @@ static inline int cpuidle_play_dead(void) {return -ENODEV; }
194 194
195#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 195#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
196void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); 196void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
197#else
198static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
199{
200}
197#endif 201#endif
198 202
199/****************************** 203/******************************
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 103adc6d7e3a..ec45ccd8708a 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -503,6 +503,8 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
503extern int __init efi_uart_console_only (void); 503extern int __init efi_uart_console_only (void);
504extern void efi_initialize_iomem_resources(struct resource *code_resource, 504extern void efi_initialize_iomem_resources(struct resource *code_resource,
505 struct resource *data_resource, struct resource *bss_resource); 505 struct resource *data_resource, struct resource *bss_resource);
506extern unsigned long efi_get_time(void);
507extern int efi_set_rtc_mmss(unsigned long nowtime);
506extern void efi_reserve_boot_services(void); 508extern void efi_reserve_boot_services(void);
507extern struct efi_memory_map memmap; 509extern struct efi_memory_map memmap;
508 510
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 6960fc1841a7..aa2e167e1ef4 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -96,21 +96,6 @@ static inline void team_netpoll_send_skb(struct team_port *port,
96} 96}
97#endif 97#endif
98 98
99static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
100 struct sk_buff *skb)
101{
102 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
103 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
104 skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
105
106 skb->dev = port->dev;
107 if (unlikely(netpoll_tx_running(port->dev))) {
108 team_netpoll_send_skb(port, skb);
109 return 0;
110 }
111 return dev_queue_xmit(skb);
112}
113
114struct team_mode_ops { 99struct team_mode_ops {
115 int (*init)(struct team *team); 100 int (*init)(struct team *team);
116 void (*exit)(struct team *team); 101 void (*exit)(struct team *team);
@@ -200,6 +185,21 @@ struct team {
200 long mode_priv[TEAM_MODE_PRIV_LONGS]; 185 long mode_priv[TEAM_MODE_PRIV_LONGS];
201}; 186};
202 187
188static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
189 struct sk_buff *skb)
190{
191 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
192 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
193 skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
194
195 skb->dev = port->dev;
196 if (unlikely(netpoll_tx_running(team->dev))) {
197 team_netpoll_send_skb(port, skb);
198 return 0;
199 }
200 return dev_queue_xmit(skb);
201}
202
203static inline struct hlist_head *team_port_index_hash(struct team *team, 203static inline struct hlist_head *team_port_index_hash(struct team *team,
204 int port_index) 204 int port_index)
205{ 205{
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 9c07dcebded7..65af6887872f 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -18,6 +18,7 @@
18#include <linux/bug.h> 18#include <linux/bug.h>
19#include <linux/atomic.h> 19#include <linux/atomic.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/mutex.h>
21 22
22struct kref { 23struct kref {
23 atomic_t refcount; 24 atomic_t refcount;
@@ -93,4 +94,21 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
93{ 94{
94 return kref_sub(kref, 1, release); 95 return kref_sub(kref, 1, release);
95} 96}
97
98static inline int kref_put_mutex(struct kref *kref,
99 void (*release)(struct kref *kref),
100 struct mutex *lock)
101{
102 WARN_ON(release == NULL);
103 if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
104 mutex_lock(lock);
105 if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
106 mutex_unlock(lock);
107 return 0;
108 }
109 release(kref);
110 return 1;
111 }
112 return 0;
113}
96#endif /* _KREF_H_ */ 114#endif /* _KREF_H_ */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 603bec2913b0..06177ba10a16 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -58,13 +58,6 @@ union ktime {
58 58
59typedef union ktime ktime_t; /* Kill this */ 59typedef union ktime ktime_t; /* Kill this */
60 60
61#define KTIME_MAX ((s64)~((u64)1 << 63))
62#if (BITS_PER_LONG == 64)
63# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
64#else
65# define KTIME_SEC_MAX LONG_MAX
66#endif
67
68/* 61/*
69 * ktime_t definitions when using the 64-bit scalar representation: 62 * ktime_t definitions when using the 64-bit scalar representation:
70 */ 63 */
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 51bf8ada6dc0..49258e0ed1c6 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -15,6 +15,8 @@
15#define MV643XX_ETH_SIZE_REG_4 0x2224 15#define MV643XX_ETH_SIZE_REG_4 0x2224
16#define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x2290 16#define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x2290
17 17
18#define MV643XX_TX_CSUM_DEFAULT_LIMIT 0
19
18struct mv643xx_eth_shared_platform_data { 20struct mv643xx_eth_shared_platform_data {
19 struct mbus_dram_target_info *dram; 21 struct mbus_dram_target_info *dram;
20 struct platform_device *shared_smi; 22 struct platform_device *shared_smi;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a9db4f33407f..59dc05f38247 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -953,7 +953,8 @@ struct net_device_ops {
953#ifdef CONFIG_NET_POLL_CONTROLLER 953#ifdef CONFIG_NET_POLL_CONTROLLER
954 void (*ndo_poll_controller)(struct net_device *dev); 954 void (*ndo_poll_controller)(struct net_device *dev);
955 int (*ndo_netpoll_setup)(struct net_device *dev, 955 int (*ndo_netpoll_setup)(struct net_device *dev,
956 struct netpoll_info *info); 956 struct netpoll_info *info,
957 gfp_t gfp);
957 void (*ndo_netpoll_cleanup)(struct net_device *dev); 958 void (*ndo_netpoll_cleanup)(struct net_device *dev);
958#endif 959#endif
959 int (*ndo_set_vf_mac)(struct net_device *dev, 960 int (*ndo_set_vf_mac)(struct net_device *dev,
@@ -1521,6 +1522,8 @@ struct packet_type {
1521 struct sk_buff **(*gro_receive)(struct sk_buff **head, 1522 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1522 struct sk_buff *skb); 1523 struct sk_buff *skb);
1523 int (*gro_complete)(struct sk_buff *skb); 1524 int (*gro_complete)(struct sk_buff *skb);
1525 bool (*id_match)(struct packet_type *ptype,
1526 struct sock *sk);
1524 void *af_packet_priv; 1527 void *af_packet_priv;
1525 struct list_head list; 1528 struct list_head list;
1526}; 1529};
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index 0dfc8b7210a3..89f2a627f3f0 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -164,7 +164,7 @@ extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr
164 unsigned int dataoff, unsigned int datalen, 164 unsigned int dataoff, unsigned int datalen,
165 const char *name, 165 const char *name,
166 unsigned int *matchoff, unsigned int *matchlen, 166 unsigned int *matchoff, unsigned int *matchlen,
167 union nf_inet_addr *addr); 167 union nf_inet_addr *addr, bool delim);
168extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, 168extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
169 unsigned int off, unsigned int datalen, 169 unsigned int off, unsigned int datalen,
170 const char *name, 170 const char *name,
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 28f5389c924b..66d5379c305e 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -23,6 +23,7 @@ struct netpoll {
23 u8 remote_mac[ETH_ALEN]; 23 u8 remote_mac[ETH_ALEN];
24 24
25 struct list_head rx; /* rx_np list element */ 25 struct list_head rx; /* rx_np list element */
26 struct rcu_head rcu;
26}; 27};
27 28
28struct netpoll_info { 29struct netpoll_info {
@@ -38,28 +39,40 @@ struct netpoll_info {
38 struct delayed_work tx_work; 39 struct delayed_work tx_work;
39 40
40 struct netpoll *netpoll; 41 struct netpoll *netpoll;
42 struct rcu_head rcu;
41}; 43};
42 44
43void netpoll_send_udp(struct netpoll *np, const char *msg, int len); 45void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
44void netpoll_print_options(struct netpoll *np); 46void netpoll_print_options(struct netpoll *np);
45int netpoll_parse_options(struct netpoll *np, char *opt); 47int netpoll_parse_options(struct netpoll *np, char *opt);
46int __netpoll_setup(struct netpoll *np, struct net_device *ndev); 48int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
47int netpoll_setup(struct netpoll *np); 49int netpoll_setup(struct netpoll *np);
48int netpoll_trap(void); 50int netpoll_trap(void);
49void netpoll_set_trap(int trap); 51void netpoll_set_trap(int trap);
50void __netpoll_cleanup(struct netpoll *np); 52void __netpoll_cleanup(struct netpoll *np);
53void __netpoll_free_rcu(struct netpoll *np);
51void netpoll_cleanup(struct netpoll *np); 54void netpoll_cleanup(struct netpoll *np);
52int __netpoll_rx(struct sk_buff *skb); 55int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
53void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, 56void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
54 struct net_device *dev); 57 struct net_device *dev);
55static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) 58static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
56{ 59{
60 unsigned long flags;
61 local_irq_save(flags);
57 netpoll_send_skb_on_dev(np, skb, np->dev); 62 netpoll_send_skb_on_dev(np, skb, np->dev);
63 local_irq_restore(flags);
58} 64}
59 65
60 66
61 67
62#ifdef CONFIG_NETPOLL 68#ifdef CONFIG_NETPOLL
69static inline bool netpoll_rx_on(struct sk_buff *skb)
70{
71 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
72
73 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
74}
75
63static inline bool netpoll_rx(struct sk_buff *skb) 76static inline bool netpoll_rx(struct sk_buff *skb)
64{ 77{
65 struct netpoll_info *npinfo; 78 struct netpoll_info *npinfo;
@@ -67,14 +80,14 @@ static inline bool netpoll_rx(struct sk_buff *skb)
67 bool ret = false; 80 bool ret = false;
68 81
69 local_irq_save(flags); 82 local_irq_save(flags);
70 npinfo = rcu_dereference_bh(skb->dev->npinfo);
71 83
72 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) 84 if (!netpoll_rx_on(skb))
73 goto out; 85 goto out;
74 86
87 npinfo = rcu_dereference_bh(skb->dev->npinfo);
75 spin_lock(&npinfo->rx_lock); 88 spin_lock(&npinfo->rx_lock);
76 /* check rx_flags again with the lock held */ 89 /* check rx_flags again with the lock held */
77 if (npinfo->rx_flags && __netpoll_rx(skb)) 90 if (npinfo->rx_flags && __netpoll_rx(skb, npinfo))
78 ret = true; 91 ret = true;
79 spin_unlock(&npinfo->rx_lock); 92 spin_unlock(&npinfo->rx_lock);
80 93
@@ -83,13 +96,6 @@ out:
83 return ret; 96 return ret;
84} 97}
85 98
86static inline int netpoll_rx_on(struct sk_buff *skb)
87{
88 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
89
90 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
91}
92
93static inline int netpoll_receive_skb(struct sk_buff *skb) 99static inline int netpoll_receive_skb(struct sk_buff *skb)
94{ 100{
95 if (!list_empty(&skb->dev->napi_list)) 101 if (!list_empty(&skb->dev->napi_list))
@@ -119,7 +125,7 @@ static inline void netpoll_poll_unlock(void *have)
119 } 125 }
120} 126}
121 127
122static inline int netpoll_tx_running(struct net_device *dev) 128static inline bool netpoll_tx_running(struct net_device *dev)
123{ 129{
124 return irqs_disabled(); 130 return irqs_disabled();
125} 131}
@@ -127,11 +133,11 @@ static inline int netpoll_tx_running(struct net_device *dev)
127#else 133#else
128static inline bool netpoll_rx(struct sk_buff *skb) 134static inline bool netpoll_rx(struct sk_buff *skb)
129{ 135{
130 return 0; 136 return false;
131} 137}
132static inline int netpoll_rx_on(struct sk_buff *skb) 138static inline bool netpoll_rx_on(struct sk_buff *skb)
133{ 139{
134 return 0; 140 return false;
135} 141}
136static inline int netpoll_receive_skb(struct sk_buff *skb) 142static inline int netpoll_receive_skb(struct sk_buff *skb)
137{ 143{
@@ -147,9 +153,9 @@ static inline void netpoll_poll_unlock(void *have)
147static inline void netpoll_netdev_init(struct net_device *dev) 153static inline void netpoll_netdev_init(struct net_device *dev)
148{ 154{
149} 155}
150static inline int netpoll_tx_running(struct net_device *dev) 156static inline bool netpoll_tx_running(struct net_device *dev)
151{ 157{
152 return 0; 158 return false;
153} 159}
154#endif 160#endif
155 161
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 880805774f9f..92ce5783b707 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -69,6 +69,7 @@ struct nfs_pageio_descriptor {
69 const struct nfs_pgio_completion_ops *pg_completion_ops; 69 const struct nfs_pgio_completion_ops *pg_completion_ops;
70 struct pnfs_layout_segment *pg_lseg; 70 struct pnfs_layout_segment *pg_lseg;
71 struct nfs_direct_req *pg_dreq; 71 struct nfs_direct_req *pg_dreq;
72 void *pg_layout_private;
72}; 73};
73 74
74#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) 75#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 00485e084394..ac7c8ae254f2 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1248,6 +1248,7 @@ struct nfs_pgio_header {
1248 void (*release) (struct nfs_pgio_header *hdr); 1248 void (*release) (struct nfs_pgio_header *hdr);
1249 const struct nfs_pgio_completion_ops *completion_ops; 1249 const struct nfs_pgio_completion_ops *completion_ops;
1250 struct nfs_direct_req *dreq; 1250 struct nfs_direct_req *dreq;
1251 void *layout_private;
1251 spinlock_t lock; 1252 spinlock_t lock;
1252 /* fields protected by lock */ 1253 /* fields protected by lock */
1253 int pnfs_error; 1254 int pnfs_error;
diff --git a/include/linux/of.h b/include/linux/of.h
index 5919ee33f2b7..1b1163225f3b 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -190,10 +190,17 @@ extern struct device_node *of_get_parent(const struct device_node *node);
190extern struct device_node *of_get_next_parent(struct device_node *node); 190extern struct device_node *of_get_next_parent(struct device_node *node);
191extern struct device_node *of_get_next_child(const struct device_node *node, 191extern struct device_node *of_get_next_child(const struct device_node *node,
192 struct device_node *prev); 192 struct device_node *prev);
193extern struct device_node *of_get_next_available_child(
194 const struct device_node *node, struct device_node *prev);
195
193#define for_each_child_of_node(parent, child) \ 196#define for_each_child_of_node(parent, child) \
194 for (child = of_get_next_child(parent, NULL); child != NULL; \ 197 for (child = of_get_next_child(parent, NULL); child != NULL; \
195 child = of_get_next_child(parent, child)) 198 child = of_get_next_child(parent, child))
196 199
200#define for_each_available_child_of_node(parent, child) \
201 for (child = of_get_next_available_child(parent, NULL); child != NULL; \
202 child = of_get_next_available_child(parent, child))
203
197static inline int of_get_child_count(const struct device_node *np) 204static inline int of_get_child_count(const struct device_node *np)
198{ 205{
199 struct device_node *child; 206 struct device_node *child;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index fc3526077348..6b4565c440c8 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2149,7 +2149,7 @@
2149#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 2149#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
2150#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 2150#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9
2151#define PCI_DEVICE_ID_NX2_5706S 0x16aa 2151#define PCI_DEVICE_ID_NX2_5706S 0x16aa
2152#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab 2152#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
2153#define PCI_DEVICE_ID_NX2_5708S 0x16ac 2153#define PCI_DEVICE_ID_NX2_5708S 0x16ac
2154#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad 2154#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad
2155#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae 2155#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 6dd96fb45482..e9b7f4350844 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -20,6 +20,7 @@
20/* This struct is private to the core and should be regarded as a cookie */ 20/* This struct is private to the core and should be regarded as a cookie */
21struct pinctrl; 21struct pinctrl;
22struct pinctrl_state; 22struct pinctrl_state;
23struct device;
23 24
24#ifdef CONFIG_PINCTRL 25#ifdef CONFIG_PINCTRL
25 26
diff --git a/include/linux/string.h b/include/linux/string.h
index ffe0442e18d2..b9178812d9df 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -144,8 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)
144{ 144{
145 return strncmp(str, prefix, strlen(prefix)) == 0; 145 return strncmp(str, prefix, strlen(prefix)) == 0;
146} 146}
147#endif
148 147
149extern size_t memweight(const void *ptr, size_t bytes); 148extern size_t memweight(const void *ptr, size_t bytes);
150 149
150#endif /* __KERNEL__ */
151#endif /* _LINUX_STRING_H_ */ 151#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/time.h b/include/linux/time.h
index c81c5e40fcb5..b51e664c83e7 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -107,11 +107,36 @@ static inline struct timespec timespec_sub(struct timespec lhs,
107 return ts_delta; 107 return ts_delta;
108} 108}
109 109
110#define KTIME_MAX ((s64)~((u64)1 << 63))
111#if (BITS_PER_LONG == 64)
112# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
113#else
114# define KTIME_SEC_MAX LONG_MAX
115#endif
116
110/* 117/*
111 * Returns true if the timespec is norm, false if denorm: 118 * Returns true if the timespec is norm, false if denorm:
112 */ 119 */
113#define timespec_valid(ts) \ 120static inline bool timespec_valid(const struct timespec *ts)
114 (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) 121{
122 /* Dates before 1970 are bogus */
123 if (ts->tv_sec < 0)
124 return false;
125 /* Can't have more nanoseconds then a second */
126 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
127 return false;
128 return true;
129}
130
131static inline bool timespec_valid_strict(const struct timespec *ts)
132{
133 if (!timespec_valid(ts))
134 return false;
135 /* Disallow values that could overflow ktime_t */
136 if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
137 return false;
138 return true;
139}
115 140
116extern void read_persistent_clock(struct timespec *ts); 141extern void read_persistent_clock(struct timespec *ts);
117extern void read_boot_clock(struct timespec *ts); 142extern void read_boot_clock(struct timespec *ts);
diff --git a/include/net/llc.h b/include/net/llc.h
index 226c846cab08..f2d0fc570527 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -133,7 +133,7 @@ extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
133extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb); 133extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
134extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb); 134extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
135 135
136extern int llc_station_init(void); 136extern void llc_station_init(void);
137extern void llc_station_exit(void); 137extern void llc_station_exit(void);
138 138
139#ifdef CONFIG_PROC_FS 139#ifdef CONFIG_PROC_FS
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index e1ce1048fe5f..4a045cda9c60 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
18 u16 ctmask; /* bitmask of ct events to be delivered */ 18 u16 ctmask; /* bitmask of ct events to be delivered */
19 u16 expmask; /* bitmask of expect events to be delivered */ 19 u16 expmask; /* bitmask of expect events to be delivered */
20 u32 pid; /* netlink pid of destroyer */ 20 u32 pid; /* netlink pid of destroyer */
21 struct timer_list timeout;
21}; 22};
22 23
23static inline struct nf_conntrack_ecache * 24static inline struct nf_conntrack_ecache *
diff --git a/include/net/scm.h b/include/net/scm.h
index 079d7887dac1..7dc0854f0b38 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -70,9 +70,11 @@ static __inline__ void scm_destroy(struct scm_cookie *scm)
70} 70}
71 71
72static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, 72static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
73 struct scm_cookie *scm) 73 struct scm_cookie *scm, bool forcecreds)
74{ 74{
75 memset(scm, 0, sizeof(*scm)); 75 memset(scm, 0, sizeof(*scm));
76 if (forcecreds)
77 scm_set_cred(scm, task_tgid(current), current_cred());
76 unix_get_peersec_dgram(sock, scm); 78 unix_get_peersec_dgram(sock, scm);
77 if (msg->msg_controllen <= 0) 79 if (msg->msg_controllen <= 0)
78 return 0; 80 return 0;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 62b619e82a90..976a81abe1a2 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -292,6 +292,8 @@ struct xfrm_policy_afinfo {
292 struct flowi *fl, 292 struct flowi *fl,
293 int reverse); 293 int reverse);
294 int (*get_tos)(const struct flowi *fl); 294 int (*get_tos)(const struct flowi *fl);
295 void (*init_dst)(struct net *net,
296 struct xfrm_dst *dst);
295 int (*init_path)(struct xfrm_dst *path, 297 int (*init_path)(struct xfrm_dst *path,
296 struct dst_entry *dst, 298 struct dst_entry *dst,
297 int nfheader_len); 299 int nfheader_len);
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index c75c0d1a85e2..cdca2ab1e711 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -1075,7 +1075,8 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
1075const char *snd_pcm_format_name(snd_pcm_format_t format); 1075const char *snd_pcm_format_name(snd_pcm_format_t format);
1076 1076
1077/** 1077/**
1078 * Get a string naming the direction of a stream 1078 * snd_pcm_stream_str - Get a string naming the direction of a stream
1079 * @substream: the pcm substream instance
1079 */ 1080 */
1080static inline const char *snd_pcm_stream_str(struct snd_pcm_substream *substream) 1081static inline const char *snd_pcm_stream_str(struct snd_pcm_substream *substream)
1081{ 1082{
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 128ce46fa48a..015cea01ae39 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -503,8 +503,6 @@ struct se_cmd {
503 u32 se_ordered_id; 503 u32 se_ordered_id;
504 /* Total size in bytes associated with command */ 504 /* Total size in bytes associated with command */
505 u32 data_length; 505 u32 data_length;
506 /* SCSI Presented Data Transfer Length */
507 u32 cmd_spdtl;
508 u32 residual_count; 506 u32 residual_count;
509 u32 orig_fe_lun; 507 u32 orig_fe_lun;
510 /* Persistent Reservation key */ 508 /* Persistent Reservation key */
diff --git a/include/xen/events.h b/include/xen/events.h
index 9c641deb65d2..04399b28e821 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -58,8 +58,6 @@ void notify_remote_via_irq(int irq);
58 58
59void xen_irq_resume(void); 59void xen_irq_resume(void);
60 60
61void xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn);
62
63/* Clear an irq's pending state, in preparation for polling on it */ 61/* Clear an irq's pending state, in preparation for polling on it */
64void xen_clear_irq_pending(int irq); 62void xen_clear_irq_pending(int irq);
65void xen_set_irq_pending(int irq); 63void xen_set_irq_pending(int irq);
diff --git a/init/main.c b/init/main.c
index e60679de61c3..b28673087ac0 100644
--- a/init/main.c
+++ b/init/main.c
@@ -461,10 +461,6 @@ static void __init mm_init(void)
461 percpu_init_late(); 461 percpu_init_late();
462 pgtable_cache_init(); 462 pgtable_cache_init();
463 vmalloc_init(); 463 vmalloc_init();
464#ifdef CONFIG_X86
465 if (efi_enabled)
466 efi_enter_virtual_mode();
467#endif
468} 464}
469 465
470asmlinkage void __init start_kernel(void) 466asmlinkage void __init start_kernel(void)
@@ -606,6 +602,10 @@ asmlinkage void __init start_kernel(void)
606 calibrate_delay(); 602 calibrate_delay();
607 pidmap_init(); 603 pidmap_init();
608 anon_vma_init(); 604 anon_vma_init();
605#ifdef CONFIG_X86
606 if (efi_enabled)
607 efi_enter_virtual_mode();
608#endif
609 thread_info_cache_init(); 609 thread_info_cache_init();
610 cred_init(); 610 cred_init();
611 fork_init(totalram_pages); 611 fork_init(totalram_pages);
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index f8e54f5b9080..9a08acc9e649 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -726,7 +726,6 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
726 struct mq_attr *attr) 726 struct mq_attr *attr)
727{ 727{
728 const struct cred *cred = current_cred(); 728 const struct cred *cred = current_cred();
729 struct file *result;
730 int ret; 729 int ret;
731 730
732 if (attr) { 731 if (attr) {
@@ -748,21 +747,11 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
748 } 747 }
749 748
750 mode &= ~current_umask(); 749 mode &= ~current_umask();
751 ret = mnt_want_write(path->mnt);
752 if (ret)
753 return ERR_PTR(ret);
754 ret = vfs_create(dir, path->dentry, mode, true); 750 ret = vfs_create(dir, path->dentry, mode, true);
755 path->dentry->d_fsdata = NULL; 751 path->dentry->d_fsdata = NULL;
756 if (!ret) 752 if (ret)
757 result = dentry_open(path, oflag, cred); 753 return ERR_PTR(ret);
758 else 754 return dentry_open(path, oflag, cred);
759 result = ERR_PTR(ret);
760 /*
761 * dentry_open() took a persistent mnt_want_write(),
762 * so we can now drop this one.
763 */
764 mnt_drop_write(path->mnt);
765 return result;
766} 755}
767 756
768/* Opens existing queue */ 757/* Opens existing queue */
@@ -788,7 +777,9 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
788 struct mq_attr attr; 777 struct mq_attr attr;
789 int fd, error; 778 int fd, error;
790 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 779 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
791 struct dentry *root = ipc_ns->mq_mnt->mnt_root; 780 struct vfsmount *mnt = ipc_ns->mq_mnt;
781 struct dentry *root = mnt->mnt_root;
782 int ro;
792 783
793 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) 784 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
794 return -EFAULT; 785 return -EFAULT;
@@ -802,6 +793,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
802 if (fd < 0) 793 if (fd < 0)
803 goto out_putname; 794 goto out_putname;
804 795
796 ro = mnt_want_write(mnt); /* we'll drop it in any case */
805 error = 0; 797 error = 0;
806 mutex_lock(&root->d_inode->i_mutex); 798 mutex_lock(&root->d_inode->i_mutex);
807 path.dentry = lookup_one_len(name, root, strlen(name)); 799 path.dentry = lookup_one_len(name, root, strlen(name));
@@ -809,7 +801,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
809 error = PTR_ERR(path.dentry); 801 error = PTR_ERR(path.dentry);
810 goto out_putfd; 802 goto out_putfd;
811 } 803 }
812 path.mnt = mntget(ipc_ns->mq_mnt); 804 path.mnt = mntget(mnt);
813 805
814 if (oflag & O_CREAT) { 806 if (oflag & O_CREAT) {
815 if (path.dentry->d_inode) { /* entry already exists */ 807 if (path.dentry->d_inode) { /* entry already exists */
@@ -820,6 +812,10 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
820 } 812 }
821 filp = do_open(&path, oflag); 813 filp = do_open(&path, oflag);
822 } else { 814 } else {
815 if (ro) {
816 error = ro;
817 goto out;
818 }
823 filp = do_create(ipc_ns, root->d_inode, 819 filp = do_create(ipc_ns, root->d_inode,
824 &path, oflag, mode, 820 &path, oflag, mode,
825 u_attr ? &attr : NULL); 821 u_attr ? &attr : NULL);
@@ -845,6 +841,7 @@ out_putfd:
845 fd = error; 841 fd = error;
846 } 842 }
847 mutex_unlock(&root->d_inode->i_mutex); 843 mutex_unlock(&root->d_inode->i_mutex);
844 mnt_drop_write(mnt);
848out_putname: 845out_putname:
849 putname(name); 846 putname(name);
850 return fd; 847 return fd;
@@ -857,40 +854,38 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
857 struct dentry *dentry; 854 struct dentry *dentry;
858 struct inode *inode = NULL; 855 struct inode *inode = NULL;
859 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 856 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
857 struct vfsmount *mnt = ipc_ns->mq_mnt;
860 858
861 name = getname(u_name); 859 name = getname(u_name);
862 if (IS_ERR(name)) 860 if (IS_ERR(name))
863 return PTR_ERR(name); 861 return PTR_ERR(name);
864 862
865 mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex, 863 err = mnt_want_write(mnt);
866 I_MUTEX_PARENT); 864 if (err)
867 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); 865 goto out_name;
866 mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT);
867 dentry = lookup_one_len(name, mnt->mnt_root, strlen(name));
868 if (IS_ERR(dentry)) { 868 if (IS_ERR(dentry)) {
869 err = PTR_ERR(dentry); 869 err = PTR_ERR(dentry);
870 goto out_unlock; 870 goto out_unlock;
871 } 871 }
872 872
873 if (!dentry->d_inode) {
874 err = -ENOENT;
875 goto out_err;
876 }
877
878 inode = dentry->d_inode; 873 inode = dentry->d_inode;
879 if (inode) 874 if (!inode) {
875 err = -ENOENT;
876 } else {
880 ihold(inode); 877 ihold(inode);
881 err = mnt_want_write(ipc_ns->mq_mnt); 878 err = vfs_unlink(dentry->d_parent->d_inode, dentry);
882 if (err) 879 }
883 goto out_err;
884 err = vfs_unlink(dentry->d_parent->d_inode, dentry);
885 mnt_drop_write(ipc_ns->mq_mnt);
886out_err:
887 dput(dentry); 880 dput(dentry);
888 881
889out_unlock: 882out_unlock:
890 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 883 mutex_unlock(&mnt->mnt_root->d_inode->i_mutex);
891 putname(name);
892 if (inode) 884 if (inode)
893 iput(inode); 885 iput(inode);
886 mnt_drop_write(mnt);
887out_name:
888 putname(name);
894 889
895 return err; 890 return err;
896} 891}
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 3a5ca582ba1e..ed206fd88cca 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -250,7 +250,6 @@ static void untag_chunk(struct node *p)
250 spin_unlock(&hash_lock); 250 spin_unlock(&hash_lock);
251 spin_unlock(&entry->lock); 251 spin_unlock(&entry->lock);
252 fsnotify_destroy_mark(entry); 252 fsnotify_destroy_mark(entry);
253 fsnotify_put_mark(entry);
254 goto out; 253 goto out;
255 } 254 }
256 255
@@ -259,7 +258,7 @@ static void untag_chunk(struct node *p)
259 258
260 fsnotify_duplicate_mark(&new->mark, entry); 259 fsnotify_duplicate_mark(&new->mark, entry);
261 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { 260 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
262 free_chunk(new); 261 fsnotify_put_mark(&new->mark);
263 goto Fallback; 262 goto Fallback;
264 } 263 }
265 264
@@ -293,7 +292,7 @@ static void untag_chunk(struct node *p)
293 spin_unlock(&hash_lock); 292 spin_unlock(&hash_lock);
294 spin_unlock(&entry->lock); 293 spin_unlock(&entry->lock);
295 fsnotify_destroy_mark(entry); 294 fsnotify_destroy_mark(entry);
296 fsnotify_put_mark(entry); 295 fsnotify_put_mark(&new->mark); /* drop initial reference */
297 goto out; 296 goto out;
298 297
299Fallback: 298Fallback:
@@ -322,7 +321,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
322 321
323 entry = &chunk->mark; 322 entry = &chunk->mark;
324 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { 323 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
325 free_chunk(chunk); 324 fsnotify_put_mark(entry);
326 return -ENOSPC; 325 return -ENOSPC;
327 } 326 }
328 327
@@ -347,6 +346,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
347 insert_hash(chunk); 346 insert_hash(chunk);
348 spin_unlock(&hash_lock); 347 spin_unlock(&hash_lock);
349 spin_unlock(&entry->lock); 348 spin_unlock(&entry->lock);
349 fsnotify_put_mark(entry); /* drop initial reference */
350 return 0; 350 return 0;
351} 351}
352 352
@@ -396,7 +396,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
396 fsnotify_duplicate_mark(chunk_entry, old_entry); 396 fsnotify_duplicate_mark(chunk_entry, old_entry);
397 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) { 397 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
398 spin_unlock(&old_entry->lock); 398 spin_unlock(&old_entry->lock);
399 free_chunk(chunk); 399 fsnotify_put_mark(chunk_entry);
400 fsnotify_put_mark(old_entry); 400 fsnotify_put_mark(old_entry);
401 return -ENOSPC; 401 return -ENOSPC;
402 } 402 }
@@ -444,8 +444,8 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
444 spin_unlock(&chunk_entry->lock); 444 spin_unlock(&chunk_entry->lock);
445 spin_unlock(&old_entry->lock); 445 spin_unlock(&old_entry->lock);
446 fsnotify_destroy_mark(old_entry); 446 fsnotify_destroy_mark(old_entry);
447 fsnotify_put_mark(chunk_entry); /* drop initial reference */
447 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ 448 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
448 fsnotify_put_mark(old_entry); /* and kill it */
449 return 0; 449 return 0;
450} 450}
451 451
@@ -916,7 +916,12 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify
916 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); 916 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
917 917
918 evict_chunk(chunk); 918 evict_chunk(chunk);
919 fsnotify_put_mark(entry); 919
920 /*
921 * We are guaranteed to have at least one reference to the mark from
922 * either the inode or the caller of fsnotify_destroy_mark().
923 */
924 BUG_ON(atomic_read(&entry->refcnt) < 1);
920} 925}
921 926
922static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, 927static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
diff --git a/kernel/fork.c b/kernel/fork.c
index 3bd2280d79f6..2c8857e12855 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -455,8 +455,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
455 if (retval) 455 if (retval)
456 goto out; 456 goto out;
457 457
458 if (file && uprobe_mmap(tmp)) 458 if (file)
459 goto out; 459 uprobe_mmap(tmp);
460 } 460 }
461 /* a new mm has just been created */ 461 /* a new mm has just been created */
462 arch_dup_mmap(oldmm, mm); 462 arch_dup_mmap(oldmm, mm);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 82ad284f823b..fbf1fd098dc6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3142,6 +3142,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3142# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) 3142# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
3143#endif 3143#endif
3144 3144
3145static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
3146{
3147 u64 temp = (__force u64) rtime;
3148
3149 temp *= (__force u64) utime;
3150
3151 if (sizeof(cputime_t) == 4)
3152 temp = div_u64(temp, (__force u32) total);
3153 else
3154 temp = div64_u64(temp, (__force u64) total);
3155
3156 return (__force cputime_t) temp;
3157}
3158
3145void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) 3159void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3146{ 3160{
3147 cputime_t rtime, utime = p->utime, total = utime + p->stime; 3161 cputime_t rtime, utime = p->utime, total = utime + p->stime;
@@ -3151,13 +3165,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3151 */ 3165 */
3152 rtime = nsecs_to_cputime(p->se.sum_exec_runtime); 3166 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
3153 3167
3154 if (total) { 3168 if (total)
3155 u64 temp = (__force u64) rtime; 3169 utime = scale_utime(utime, rtime, total);
3156 3170 else
3157 temp *= (__force u64) utime;
3158 do_div(temp, (__force u32) total);
3159 utime = (__force cputime_t) temp;
3160 } else
3161 utime = rtime; 3171 utime = rtime;
3162 3172
3163 /* 3173 /*
@@ -3184,13 +3194,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3184 total = cputime.utime + cputime.stime; 3194 total = cputime.utime + cputime.stime;
3185 rtime = nsecs_to_cputime(cputime.sum_exec_runtime); 3195 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
3186 3196
3187 if (total) { 3197 if (total)
3188 u64 temp = (__force u64) rtime; 3198 utime = scale_utime(cputime.utime, rtime, total);
3189 3199 else
3190 temp *= (__force u64) cputime.utime;
3191 do_div(temp, (__force u32) total);
3192 utime = (__force cputime_t) temp;
3193 } else
3194 utime = rtime; 3200 utime = rtime;
3195 3201
3196 sig->prev_utime = max(sig->prev_utime, utime); 3202 sig->prev_utime = max(sig->prev_utime, utime);
@@ -7246,6 +7252,7 @@ int in_sched_functions(unsigned long addr)
7246 7252
7247#ifdef CONFIG_CGROUP_SCHED 7253#ifdef CONFIG_CGROUP_SCHED
7248struct task_group root_task_group; 7254struct task_group root_task_group;
7255LIST_HEAD(task_groups);
7249#endif 7256#endif
7250 7257
7251DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask); 7258DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d0cc03b3e70b..c219bf8d704c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3387,6 +3387,14 @@ static int tg_load_down(struct task_group *tg, void *data)
3387 3387
3388static void update_h_load(long cpu) 3388static void update_h_load(long cpu)
3389{ 3389{
3390 struct rq *rq = cpu_rq(cpu);
3391 unsigned long now = jiffies;
3392
3393 if (rq->h_load_throttle == now)
3394 return;
3395
3396 rq->h_load_throttle = now;
3397
3390 rcu_read_lock(); 3398 rcu_read_lock();
3391 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); 3399 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
3392 rcu_read_unlock(); 3400 rcu_read_unlock();
@@ -4293,11 +4301,10 @@ redo:
4293 env.src_rq = busiest; 4301 env.src_rq = busiest;
4294 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); 4302 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
4295 4303
4304 update_h_load(env.src_cpu);
4296more_balance: 4305more_balance:
4297 local_irq_save(flags); 4306 local_irq_save(flags);
4298 double_rq_lock(this_rq, busiest); 4307 double_rq_lock(this_rq, busiest);
4299 if (!env.loop)
4300 update_h_load(env.src_cpu);
4301 4308
4302 /* 4309 /*
4303 * cur_ld_moved - load moved in current iteration 4310 * cur_ld_moved - load moved in current iteration
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 573e1ca01102..944cb68420e9 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -788,6 +788,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
788 const struct cpumask *span; 788 const struct cpumask *span;
789 789
790 span = sched_rt_period_mask(); 790 span = sched_rt_period_mask();
791#ifdef CONFIG_RT_GROUP_SCHED
792 /*
793 * FIXME: isolated CPUs should really leave the root task group,
794 * whether they are isolcpus or were isolated via cpusets, lest
795 * the timer run on a CPU which does not service all runqueues,
796 * potentially leaving other CPUs indefinitely throttled. If
797 * isolation is really required, the user will turn the throttle
798 * off to kill the perturbations it causes anyway. Meanwhile,
799 * this maintains functionality for boot and/or troubleshooting.
800 */
801 if (rt_b == &root_task_group.rt_bandwidth)
802 span = cpu_online_mask;
803#endif
791 for_each_cpu(i, span) { 804 for_each_cpu(i, span) {
792 int enqueue = 0; 805 int enqueue = 0;
793 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 806 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c35a1a7dd4d6..f6714d009e77 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -80,7 +80,7 @@ extern struct mutex sched_domains_mutex;
80struct cfs_rq; 80struct cfs_rq;
81struct rt_rq; 81struct rt_rq;
82 82
83static LIST_HEAD(task_groups); 83extern struct list_head task_groups;
84 84
85struct cfs_bandwidth { 85struct cfs_bandwidth {
86#ifdef CONFIG_CFS_BANDWIDTH 86#ifdef CONFIG_CFS_BANDWIDTH
@@ -374,7 +374,11 @@ struct rq {
374#ifdef CONFIG_FAIR_GROUP_SCHED 374#ifdef CONFIG_FAIR_GROUP_SCHED
375 /* list of leaf cfs_rq on this cpu: */ 375 /* list of leaf cfs_rq on this cpu: */
376 struct list_head leaf_cfs_rq_list; 376 struct list_head leaf_cfs_rq_list;
377#endif 377#ifdef CONFIG_SMP
378 unsigned long h_load_throttle;
379#endif /* CONFIG_SMP */
380#endif /* CONFIG_FAIR_GROUP_SCHED */
381
378#ifdef CONFIG_RT_GROUP_SCHED 382#ifdef CONFIG_RT_GROUP_SCHED
379 struct list_head leaf_rt_rq_list; 383 struct list_head leaf_rt_rq_list;
380#endif 384#endif
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 7b386e86fd23..da5eb5bed84a 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -27,8 +27,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
27{ 27{
28 struct task_struct *stop = rq->stop; 28 struct task_struct *stop = rq->stop;
29 29
30 if (stop && stop->on_rq) 30 if (stop && stop->on_rq) {
31 stop->se.exec_start = rq->clock_task;
31 return stop; 32 return stop;
33 }
32 34
33 return NULL; 35 return NULL;
34} 36}
@@ -52,6 +54,21 @@ static void yield_task_stop(struct rq *rq)
52 54
53static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) 55static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
54{ 56{
57 struct task_struct *curr = rq->curr;
58 u64 delta_exec;
59
60 delta_exec = rq->clock_task - curr->se.exec_start;
61 if (unlikely((s64)delta_exec < 0))
62 delta_exec = 0;
63
64 schedstat_set(curr->se.statistics.exec_max,
65 max(curr->se.statistics.exec_max, delta_exec));
66
67 curr->se.sum_exec_runtime += delta_exec;
68 account_group_exec_runtime(curr, delta_exec);
69
70 curr->se.exec_start = rq->clock_task;
71 cpuacct_charge(curr, delta_exec);
55} 72}
56 73
57static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) 74static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
@@ -60,6 +77,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
60 77
61static void set_curr_task_stop(struct rq *rq) 78static void set_curr_task_stop(struct rq *rq)
62{ 79{
80 struct task_struct *stop = rq->stop;
81
82 stop->se.exec_start = rq->clock_task;
63} 83}
64 84
65static void switched_to_stop(struct rq *rq, struct task_struct *p) 85static void switched_to_stop(struct rq *rq, struct task_struct *p)
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 91d4e1742a0c..d320d44903bd 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -75,6 +75,7 @@ void task_work_run(void)
75 p = q->next; 75 p = q->next;
76 q->func(q); 76 q->func(q);
77 q = p; 77 q = p;
78 cond_resched();
78 } 79 }
79 } 80 }
80} 81}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e16af197a2bc..34e5eac81424 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -115,6 +115,7 @@ static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
115{ 115{
116 tk->xtime_sec += ts->tv_sec; 116 tk->xtime_sec += ts->tv_sec;
117 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; 117 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
118 tk_normalize_xtime(tk);
118} 119}
119 120
120static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) 121static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
@@ -276,7 +277,7 @@ static void timekeeping_forward_now(struct timekeeper *tk)
276 tk->xtime_nsec += cycle_delta * tk->mult; 277 tk->xtime_nsec += cycle_delta * tk->mult;
277 278
278 /* If arch requires, add in gettimeoffset() */ 279 /* If arch requires, add in gettimeoffset() */
279 tk->xtime_nsec += arch_gettimeoffset() << tk->shift; 280 tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift;
280 281
281 tk_normalize_xtime(tk); 282 tk_normalize_xtime(tk);
282 283
@@ -427,7 +428,7 @@ int do_settimeofday(const struct timespec *tv)
427 struct timespec ts_delta, xt; 428 struct timespec ts_delta, xt;
428 unsigned long flags; 429 unsigned long flags;
429 430
430 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 431 if (!timespec_valid_strict(tv))
431 return -EINVAL; 432 return -EINVAL;
432 433
433 write_seqlock_irqsave(&tk->lock, flags); 434 write_seqlock_irqsave(&tk->lock, flags);
@@ -463,6 +464,8 @@ int timekeeping_inject_offset(struct timespec *ts)
463{ 464{
464 struct timekeeper *tk = &timekeeper; 465 struct timekeeper *tk = &timekeeper;
465 unsigned long flags; 466 unsigned long flags;
467 struct timespec tmp;
468 int ret = 0;
466 469
467 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) 470 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
468 return -EINVAL; 471 return -EINVAL;
@@ -471,10 +474,17 @@ int timekeeping_inject_offset(struct timespec *ts)
471 474
472 timekeeping_forward_now(tk); 475 timekeeping_forward_now(tk);
473 476
477 /* Make sure the proposed value is valid */
478 tmp = timespec_add(tk_xtime(tk), *ts);
479 if (!timespec_valid_strict(&tmp)) {
480 ret = -EINVAL;
481 goto error;
482 }
474 483
475 tk_xtime_add(tk, ts); 484 tk_xtime_add(tk, ts);
476 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); 485 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
477 486
487error: /* even if we error out, we forwarded the time, so call update */
478 timekeeping_update(tk, true); 488 timekeeping_update(tk, true);
479 489
480 write_sequnlock_irqrestore(&tk->lock, flags); 490 write_sequnlock_irqrestore(&tk->lock, flags);
@@ -482,7 +492,7 @@ int timekeeping_inject_offset(struct timespec *ts)
482 /* signal hrtimers about time change */ 492 /* signal hrtimers about time change */
483 clock_was_set(); 493 clock_was_set();
484 494
485 return 0; 495 return ret;
486} 496}
487EXPORT_SYMBOL(timekeeping_inject_offset); 497EXPORT_SYMBOL(timekeeping_inject_offset);
488 498
@@ -649,7 +659,20 @@ void __init timekeeping_init(void)
649 struct timespec now, boot, tmp; 659 struct timespec now, boot, tmp;
650 660
651 read_persistent_clock(&now); 661 read_persistent_clock(&now);
662 if (!timespec_valid_strict(&now)) {
663 pr_warn("WARNING: Persistent clock returned invalid value!\n"
664 " Check your CMOS/BIOS settings.\n");
665 now.tv_sec = 0;
666 now.tv_nsec = 0;
667 }
668
652 read_boot_clock(&boot); 669 read_boot_clock(&boot);
670 if (!timespec_valid_strict(&boot)) {
671 pr_warn("WARNING: Boot clock returned invalid value!\n"
672 " Check your CMOS/BIOS settings.\n");
673 boot.tv_sec = 0;
674 boot.tv_nsec = 0;
675 }
653 676
654 seqlock_init(&tk->lock); 677 seqlock_init(&tk->lock);
655 678
@@ -690,7 +713,7 @@ static struct timespec timekeeping_suspend_time;
690static void __timekeeping_inject_sleeptime(struct timekeeper *tk, 713static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
691 struct timespec *delta) 714 struct timespec *delta)
692{ 715{
693 if (!timespec_valid(delta)) { 716 if (!timespec_valid_strict(delta)) {
694 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " 717 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
695 "sleep delta value!\n"); 718 "sleep delta value!\n");
696 return; 719 return;
@@ -1129,6 +1152,10 @@ static void update_wall_time(void)
1129 offset = (clock->read(clock) - clock->cycle_last) & clock->mask; 1152 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1130#endif 1153#endif
1131 1154
1155 /* Check if there's really nothing to do */
1156 if (offset < tk->cycle_interval)
1157 goto out;
1158
1132 /* 1159 /*
1133 * With NO_HZ we may have to accumulate many cycle_intervals 1160 * With NO_HZ we may have to accumulate many cycle_intervals
1134 * (think "ticks") worth of time at once. To do this efficiently, 1161 * (think "ticks") worth of time at once. To do this efficiently,
@@ -1161,9 +1188,9 @@ static void update_wall_time(void)
1161 * the vsyscall implementations are converted to use xtime_nsec 1188 * the vsyscall implementations are converted to use xtime_nsec
1162 * (shifted nanoseconds), this can be killed. 1189 * (shifted nanoseconds), this can be killed.
1163 */ 1190 */
1164 remainder = tk->xtime_nsec & ((1 << tk->shift) - 1); 1191 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
1165 tk->xtime_nsec -= remainder; 1192 tk->xtime_nsec -= remainder;
1166 tk->xtime_nsec += 1 << tk->shift; 1193 tk->xtime_nsec += 1ULL << tk->shift;
1167 tk->ntp_error += remainder << tk->ntp_error_shift; 1194 tk->ntp_error += remainder << tk->ntp_error_shift;
1168 1195
1169 /* 1196 /*
diff --git a/kernel/timer.c b/kernel/timer.c
index a61c09374eba..8c5e7b908c68 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1407,13 +1407,6 @@ SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1407 1407
1408#endif 1408#endif
1409 1409
1410#ifndef __alpha__
1411
1412/*
1413 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
1414 * should be moved into arch/i386 instead?
1415 */
1416
1417/** 1410/**
1418 * sys_getpid - return the thread group id of the current process 1411 * sys_getpid - return the thread group id of the current process
1419 * 1412 *
@@ -1469,8 +1462,6 @@ SYSCALL_DEFINE0(getegid)
1469 return from_kgid_munged(current_user_ns(), current_egid()); 1462 return from_kgid_munged(current_user_ns(), current_egid());
1470} 1463}
1471 1464
1472#endif
1473
1474static void process_timeout(unsigned long __data) 1465static void process_timeout(unsigned long __data)
1475{ 1466{
1476 wake_up_process((struct task_struct *)__data); 1467 wake_up_process((struct task_struct *)__data);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 60e4d7875672..6b245f64c8dd 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -506,6 +506,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
506 int size; 506 int size;
507 507
508 syscall_nr = syscall_get_nr(current, regs); 508 syscall_nr = syscall_get_nr(current, regs);
509 if (syscall_nr < 0)
510 return;
509 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) 511 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
510 return; 512 return;
511 513
@@ -580,6 +582,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
580 int size; 582 int size;
581 583
582 syscall_nr = syscall_get_nr(current, regs); 584 syscall_nr = syscall_get_nr(current, regs);
585 if (syscall_nr < 0)
586 return;
583 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) 587 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
584 return; 588 return;
585 589
diff --git a/mm/compaction.c b/mm/compaction.c
index e78cb9688421..7fcd3a52e68d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -51,6 +51,47 @@ static inline bool migrate_async_suitable(int migratetype)
51} 51}
52 52
53/* 53/*
54 * Compaction requires the taking of some coarse locks that are potentially
55 * very heavily contended. Check if the process needs to be scheduled or
56 * if the lock is contended. For async compaction, back out in the event
57 * if contention is severe. For sync compaction, schedule.
58 *
59 * Returns true if the lock is held.
60 * Returns false if the lock is released and compaction should abort
61 */
62static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
63 bool locked, struct compact_control *cc)
64{
65 if (need_resched() || spin_is_contended(lock)) {
66 if (locked) {
67 spin_unlock_irqrestore(lock, *flags);
68 locked = false;
69 }
70
71 /* async aborts if taking too long or contended */
72 if (!cc->sync) {
73 if (cc->contended)
74 *cc->contended = true;
75 return false;
76 }
77
78 cond_resched();
79 if (fatal_signal_pending(current))
80 return false;
81 }
82
83 if (!locked)
84 spin_lock_irqsave(lock, *flags);
85 return true;
86}
87
88static inline bool compact_trylock_irqsave(spinlock_t *lock,
89 unsigned long *flags, struct compact_control *cc)
90{
91 return compact_checklock_irqsave(lock, flags, false, cc);
92}
93
94/*
54 * Isolate free pages onto a private freelist. Caller must hold zone->lock. 95 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
55 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free 96 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
56 * pages inside of the pageblock (even though it may still end up isolating 97 * pages inside of the pageblock (even though it may still end up isolating
@@ -173,7 +214,7 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
173} 214}
174 215
175/* Update the number of anon and file isolated pages in the zone */ 216/* Update the number of anon and file isolated pages in the zone */
176static void acct_isolated(struct zone *zone, struct compact_control *cc) 217static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
177{ 218{
178 struct page *page; 219 struct page *page;
179 unsigned int count[2] = { 0, }; 220 unsigned int count[2] = { 0, };
@@ -181,8 +222,14 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc)
181 list_for_each_entry(page, &cc->migratepages, lru) 222 list_for_each_entry(page, &cc->migratepages, lru)
182 count[!!page_is_file_cache(page)]++; 223 count[!!page_is_file_cache(page)]++;
183 224
184 __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 225 /* If locked we can use the interrupt unsafe versions */
185 __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 226 if (locked) {
227 __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
228 __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
229 } else {
230 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
231 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
232 }
186} 233}
187 234
188/* Similar to reclaim, but different enough that they don't share logic */ 235/* Similar to reclaim, but different enough that they don't share logic */
@@ -228,6 +275,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
228 struct list_head *migratelist = &cc->migratepages; 275 struct list_head *migratelist = &cc->migratepages;
229 isolate_mode_t mode = 0; 276 isolate_mode_t mode = 0;
230 struct lruvec *lruvec; 277 struct lruvec *lruvec;
278 unsigned long flags;
279 bool locked;
231 280
232 /* 281 /*
233 * Ensure that there are not too many pages isolated from the LRU 282 * Ensure that there are not too many pages isolated from the LRU
@@ -247,25 +296,22 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
247 296
248 /* Time to isolate some pages for migration */ 297 /* Time to isolate some pages for migration */
249 cond_resched(); 298 cond_resched();
250 spin_lock_irq(&zone->lru_lock); 299 spin_lock_irqsave(&zone->lru_lock, flags);
300 locked = true;
251 for (; low_pfn < end_pfn; low_pfn++) { 301 for (; low_pfn < end_pfn; low_pfn++) {
252 struct page *page; 302 struct page *page;
253 bool locked = true;
254 303
255 /* give a chance to irqs before checking need_resched() */ 304 /* give a chance to irqs before checking need_resched() */
256 if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) { 305 if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) {
257 spin_unlock_irq(&zone->lru_lock); 306 spin_unlock_irqrestore(&zone->lru_lock, flags);
258 locked = false; 307 locked = false;
259 } 308 }
260 if (need_resched() || spin_is_contended(&zone->lru_lock)) { 309
261 if (locked) 310 /* Check if it is ok to still hold the lock */
262 spin_unlock_irq(&zone->lru_lock); 311 locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
263 cond_resched(); 312 locked, cc);
264 spin_lock_irq(&zone->lru_lock); 313 if (!locked)
265 if (fatal_signal_pending(current)) 314 break;
266 break;
267 } else if (!locked)
268 spin_lock_irq(&zone->lru_lock);
269 315
270 /* 316 /*
271 * migrate_pfn does not necessarily start aligned to a 317 * migrate_pfn does not necessarily start aligned to a
@@ -349,9 +395,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
349 } 395 }
350 } 396 }
351 397
352 acct_isolated(zone, cc); 398 acct_isolated(zone, locked, cc);
353 399
354 spin_unlock_irq(&zone->lru_lock); 400 if (locked)
401 spin_unlock_irqrestore(&zone->lru_lock, flags);
355 402
356 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 403 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
357 404
@@ -384,6 +431,20 @@ static bool suitable_migration_target(struct page *page)
384} 431}
385 432
386/* 433/*
434 * Returns the start pfn of the last page block in a zone. This is the starting
435 * point for full compaction of a zone. Compaction searches for free pages from
436 * the end of each zone, while isolate_freepages_block scans forward inside each
437 * page block.
438 */
439static unsigned long start_free_pfn(struct zone *zone)
440{
441 unsigned long free_pfn;
442 free_pfn = zone->zone_start_pfn + zone->spanned_pages;
443 free_pfn &= ~(pageblock_nr_pages-1);
444 return free_pfn;
445}
446
447/*
387 * Based on information in the current compact_control, find blocks 448 * Based on information in the current compact_control, find blocks
388 * suitable for isolating free pages from and then isolate them. 449 * suitable for isolating free pages from and then isolate them.
389 */ 450 */
@@ -422,17 +483,6 @@ static void isolate_freepages(struct zone *zone,
422 pfn -= pageblock_nr_pages) { 483 pfn -= pageblock_nr_pages) {
423 unsigned long isolated; 484 unsigned long isolated;
424 485
425 /*
426 * Skip ahead if another thread is compacting in the area
427 * simultaneously. If we wrapped around, we can only skip
428 * ahead if zone->compact_cached_free_pfn also wrapped to
429 * above our starting point.
430 */
431 if (cc->order > 0 && (!cc->wrapped ||
432 zone->compact_cached_free_pfn >
433 cc->start_free_pfn))
434 pfn = min(pfn, zone->compact_cached_free_pfn);
435
436 if (!pfn_valid(pfn)) 486 if (!pfn_valid(pfn))
437 continue; 487 continue;
438 488
@@ -458,7 +508,16 @@ static void isolate_freepages(struct zone *zone,
458 * are disabled 508 * are disabled
459 */ 509 */
460 isolated = 0; 510 isolated = 0;
461 spin_lock_irqsave(&zone->lock, flags); 511
512 /*
513 * The zone lock must be held to isolate freepages. This
514 * unfortunately this is a very coarse lock and can be
515 * heavily contended if there are parallel allocations
516 * or parallel compactions. For async compaction do not
517 * spin on the lock
518 */
519 if (!compact_trylock_irqsave(&zone->lock, &flags, cc))
520 break;
462 if (suitable_migration_target(page)) { 521 if (suitable_migration_target(page)) {
463 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); 522 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
464 isolated = isolate_freepages_block(pfn, end_pfn, 523 isolated = isolate_freepages_block(pfn, end_pfn,
@@ -474,7 +533,15 @@ static void isolate_freepages(struct zone *zone,
474 */ 533 */
475 if (isolated) { 534 if (isolated) {
476 high_pfn = max(high_pfn, pfn); 535 high_pfn = max(high_pfn, pfn);
477 if (cc->order > 0) 536
537 /*
538 * If the free scanner has wrapped, update
539 * compact_cached_free_pfn to point to the highest
540 * pageblock with free pages. This reduces excessive
541 * scanning of full pageblocks near the end of the
542 * zone
543 */
544 if (cc->order > 0 && cc->wrapped)
478 zone->compact_cached_free_pfn = high_pfn; 545 zone->compact_cached_free_pfn = high_pfn;
479 } 546 }
480 } 547 }
@@ -484,6 +551,11 @@ static void isolate_freepages(struct zone *zone,
484 551
485 cc->free_pfn = high_pfn; 552 cc->free_pfn = high_pfn;
486 cc->nr_freepages = nr_freepages; 553 cc->nr_freepages = nr_freepages;
554
555 /* If compact_cached_free_pfn is reset then set it now */
556 if (cc->order > 0 && !cc->wrapped &&
557 zone->compact_cached_free_pfn == start_free_pfn(zone))
558 zone->compact_cached_free_pfn = high_pfn;
487} 559}
488 560
489/* 561/*
@@ -570,20 +642,6 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
570 return ISOLATE_SUCCESS; 642 return ISOLATE_SUCCESS;
571} 643}
572 644
573/*
574 * Returns the start pfn of the last page block in a zone. This is the starting
575 * point for full compaction of a zone. Compaction searches for free pages from
576 * the end of each zone, while isolate_freepages_block scans forward inside each
577 * page block.
578 */
579static unsigned long start_free_pfn(struct zone *zone)
580{
581 unsigned long free_pfn;
582 free_pfn = zone->zone_start_pfn + zone->spanned_pages;
583 free_pfn &= ~(pageblock_nr_pages-1);
584 return free_pfn;
585}
586
587static int compact_finished(struct zone *zone, 645static int compact_finished(struct zone *zone,
588 struct compact_control *cc) 646 struct compact_control *cc)
589{ 647{
@@ -771,7 +829,7 @@ out:
771 829
772static unsigned long compact_zone_order(struct zone *zone, 830static unsigned long compact_zone_order(struct zone *zone,
773 int order, gfp_t gfp_mask, 831 int order, gfp_t gfp_mask,
774 bool sync) 832 bool sync, bool *contended)
775{ 833{
776 struct compact_control cc = { 834 struct compact_control cc = {
777 .nr_freepages = 0, 835 .nr_freepages = 0,
@@ -780,6 +838,7 @@ static unsigned long compact_zone_order(struct zone *zone,
780 .migratetype = allocflags_to_migratetype(gfp_mask), 838 .migratetype = allocflags_to_migratetype(gfp_mask),
781 .zone = zone, 839 .zone = zone,
782 .sync = sync, 840 .sync = sync,
841 .contended = contended,
783 }; 842 };
784 INIT_LIST_HEAD(&cc.freepages); 843 INIT_LIST_HEAD(&cc.freepages);
785 INIT_LIST_HEAD(&cc.migratepages); 844 INIT_LIST_HEAD(&cc.migratepages);
@@ -801,7 +860,7 @@ int sysctl_extfrag_threshold = 500;
801 */ 860 */
802unsigned long try_to_compact_pages(struct zonelist *zonelist, 861unsigned long try_to_compact_pages(struct zonelist *zonelist,
803 int order, gfp_t gfp_mask, nodemask_t *nodemask, 862 int order, gfp_t gfp_mask, nodemask_t *nodemask,
804 bool sync) 863 bool sync, bool *contended)
805{ 864{
806 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 865 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
807 int may_enter_fs = gfp_mask & __GFP_FS; 866 int may_enter_fs = gfp_mask & __GFP_FS;
@@ -825,7 +884,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
825 nodemask) { 884 nodemask) {
826 int status; 885 int status;
827 886
828 status = compact_zone_order(zone, order, gfp_mask, sync); 887 status = compact_zone_order(zone, order, gfp_mask, sync,
888 contended);
829 rc = max(status, rc); 889 rc = max(status, rc);
830 890
831 /* If a normal allocation would succeed, stop compacting */ 891 /* If a normal allocation would succeed, stop compacting */
@@ -861,7 +921,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
861 if (cc->order > 0) { 921 if (cc->order > 0) {
862 int ok = zone_watermark_ok(zone, cc->order, 922 int ok = zone_watermark_ok(zone, cc->order,
863 low_wmark_pages(zone), 0, 0); 923 low_wmark_pages(zone), 0, 0);
864 if (ok && cc->order > zone->compact_order_failed) 924 if (ok && cc->order >= zone->compact_order_failed)
865 zone->compact_order_failed = cc->order + 1; 925 zone->compact_order_failed = cc->order + 1;
866 /* Currently async compaction is never deferred. */ 926 /* Currently async compaction is never deferred. */
867 else if (!ok && cc->sync) 927 else if (!ok && cc->sync)
diff --git a/mm/filemap.c b/mm/filemap.c
index fa5ca304148e..384344575c37 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1412,12 +1412,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1412 retval = filemap_write_and_wait_range(mapping, pos, 1412 retval = filemap_write_and_wait_range(mapping, pos,
1413 pos + iov_length(iov, nr_segs) - 1); 1413 pos + iov_length(iov, nr_segs) - 1);
1414 if (!retval) { 1414 if (!retval) {
1415 struct blk_plug plug;
1416
1417 blk_start_plug(&plug);
1418 retval = mapping->a_ops->direct_IO(READ, iocb, 1415 retval = mapping->a_ops->direct_IO(READ, iocb,
1419 iov, pos, nr_segs); 1416 iov, pos, nr_segs);
1420 blk_finish_plug(&plug);
1421 } 1417 }
1422 if (retval > 0) { 1418 if (retval > 0) {
1423 *ppos = pos + retval; 1419 *ppos = pos + retval;
@@ -2527,14 +2523,12 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2527{ 2523{
2528 struct file *file = iocb->ki_filp; 2524 struct file *file = iocb->ki_filp;
2529 struct inode *inode = file->f_mapping->host; 2525 struct inode *inode = file->f_mapping->host;
2530 struct blk_plug plug;
2531 ssize_t ret; 2526 ssize_t ret;
2532 2527
2533 BUG_ON(iocb->ki_pos != pos); 2528 BUG_ON(iocb->ki_pos != pos);
2534 2529
2535 sb_start_write(inode->i_sb); 2530 sb_start_write(inode->i_sb);
2536 mutex_lock(&inode->i_mutex); 2531 mutex_lock(&inode->i_mutex);
2537 blk_start_plug(&plug);
2538 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); 2532 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
2539 mutex_unlock(&inode->i_mutex); 2533 mutex_unlock(&inode->i_mutex);
2540 2534
@@ -2545,7 +2539,6 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2545 if (err < 0 && ret > 0) 2539 if (err < 0 && ret > 0)
2546 ret = err; 2540 ret = err;
2547 } 2541 }
2548 blk_finish_plug(&plug);
2549 sb_end_write(inode->i_sb); 2542 sb_end_write(inode->i_sb);
2550 return ret; 2543 return ret;
2551} 2544}
diff --git a/mm/internal.h b/mm/internal.h
index 3314f79d775a..b8c91b342e24 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -130,6 +130,7 @@ struct compact_control {
130 int order; /* order a direct compactor needs */ 130 int order; /* order a direct compactor needs */
131 int migratetype; /* MOVABLE, RECLAIMABLE etc */ 131 int migratetype; /* MOVABLE, RECLAIMABLE etc */
132 struct zone *zone; 132 struct zone *zone;
133 bool *contended; /* True if a lock was contended */
133}; 134};
134 135
135unsigned long 136unsigned long
diff --git a/mm/mmap.c b/mm/mmap.c
index e3e86914f11a..ae18a48e7e4e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1356,9 +1356,8 @@ out:
1356 } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK)) 1356 } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
1357 make_pages_present(addr, addr + len); 1357 make_pages_present(addr, addr + len);
1358 1358
1359 if (file && uprobe_mmap(vma)) 1359 if (file)
1360 /* matching probes but cannot insert */ 1360 uprobe_mmap(vma);
1361 goto unmap_and_free_vma;
1362 1361
1363 return addr; 1362 return addr;
1364 1363
@@ -2309,7 +2308,7 @@ void exit_mmap(struct mm_struct *mm)
2309 } 2308 }
2310 vm_unacct_memory(nr_accounted); 2309 vm_unacct_memory(nr_accounted);
2311 2310
2312 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); 2311 WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2313} 2312}
2314 2313
2315/* Insert vm structure into process list sorted by address 2314/* Insert vm structure into process list sorted by address
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 009ac285fea7..c66fb875104a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1928,6 +1928,17 @@ this_zone_full:
1928 zlc_active = 0; 1928 zlc_active = 0;
1929 goto zonelist_scan; 1929 goto zonelist_scan;
1930 } 1930 }
1931
1932 if (page)
1933 /*
1934 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
1935 * necessary to allocate the page. The expectation is
1936 * that the caller is taking steps that will free more
1937 * memory. The caller should avoid the page being used
1938 * for !PFMEMALLOC purposes.
1939 */
1940 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
1941
1931 return page; 1942 return page;
1932} 1943}
1933 1944
@@ -2091,7 +2102,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2091 struct zonelist *zonelist, enum zone_type high_zoneidx, 2102 struct zonelist *zonelist, enum zone_type high_zoneidx,
2092 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2103 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2093 int migratetype, bool sync_migration, 2104 int migratetype, bool sync_migration,
2094 bool *deferred_compaction, 2105 bool *contended_compaction, bool *deferred_compaction,
2095 unsigned long *did_some_progress) 2106 unsigned long *did_some_progress)
2096{ 2107{
2097 struct page *page; 2108 struct page *page;
@@ -2106,7 +2117,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2106 2117
2107 current->flags |= PF_MEMALLOC; 2118 current->flags |= PF_MEMALLOC;
2108 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, 2119 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
2109 nodemask, sync_migration); 2120 nodemask, sync_migration,
2121 contended_compaction);
2110 current->flags &= ~PF_MEMALLOC; 2122 current->flags &= ~PF_MEMALLOC;
2111 if (*did_some_progress != COMPACT_SKIPPED) { 2123 if (*did_some_progress != COMPACT_SKIPPED) {
2112 2124
@@ -2152,7 +2164,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2152 struct zonelist *zonelist, enum zone_type high_zoneidx, 2164 struct zonelist *zonelist, enum zone_type high_zoneidx,
2153 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2165 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2154 int migratetype, bool sync_migration, 2166 int migratetype, bool sync_migration,
2155 bool *deferred_compaction, 2167 bool *contended_compaction, bool *deferred_compaction,
2156 unsigned long *did_some_progress) 2168 unsigned long *did_some_progress)
2157{ 2169{
2158 return NULL; 2170 return NULL;
@@ -2325,6 +2337,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2325 unsigned long did_some_progress; 2337 unsigned long did_some_progress;
2326 bool sync_migration = false; 2338 bool sync_migration = false;
2327 bool deferred_compaction = false; 2339 bool deferred_compaction = false;
2340 bool contended_compaction = false;
2328 2341
2329 /* 2342 /*
2330 * In the slowpath, we sanity check order to avoid ever trying to 2343 * In the slowpath, we sanity check order to avoid ever trying to
@@ -2389,14 +2402,6 @@ rebalance:
2389 zonelist, high_zoneidx, nodemask, 2402 zonelist, high_zoneidx, nodemask,
2390 preferred_zone, migratetype); 2403 preferred_zone, migratetype);
2391 if (page) { 2404 if (page) {
2392 /*
2393 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
2394 * necessary to allocate the page. The expectation is
2395 * that the caller is taking steps that will free more
2396 * memory. The caller should avoid the page being used
2397 * for !PFMEMALLOC purposes.
2398 */
2399 page->pfmemalloc = true;
2400 goto got_pg; 2405 goto got_pg;
2401 } 2406 }
2402 } 2407 }
@@ -2422,6 +2427,7 @@ rebalance:
2422 nodemask, 2427 nodemask,
2423 alloc_flags, preferred_zone, 2428 alloc_flags, preferred_zone,
2424 migratetype, sync_migration, 2429 migratetype, sync_migration,
2430 &contended_compaction,
2425 &deferred_compaction, 2431 &deferred_compaction,
2426 &did_some_progress); 2432 &did_some_progress);
2427 if (page) 2433 if (page)
@@ -2431,10 +2437,11 @@ rebalance:
2431 /* 2437 /*
2432 * If compaction is deferred for high-order allocations, it is because 2438 * If compaction is deferred for high-order allocations, it is because
2433 * sync compaction recently failed. In this is the case and the caller 2439 * sync compaction recently failed. In this is the case and the caller
2434 * has requested the system not be heavily disrupted, fail the 2440 * requested a movable allocation that does not heavily disrupt the
2435 * allocation now instead of entering direct reclaim 2441 * system then fail the allocation instead of entering direct reclaim.
2436 */ 2442 */
2437 if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD)) 2443 if ((deferred_compaction || contended_compaction) &&
2444 (gfp_mask & __GFP_NO_KSWAPD))
2438 goto nopage; 2445 goto nopage;
2439 2446
2440 /* Try direct reclaim and then allocating */ 2447 /* Try direct reclaim and then allocating */
@@ -2505,6 +2512,7 @@ rebalance:
2505 nodemask, 2512 nodemask,
2506 alloc_flags, preferred_zone, 2513 alloc_flags, preferred_zone,
2507 migratetype, sync_migration, 2514 migratetype, sync_migration,
2515 &contended_compaction,
2508 &deferred_compaction, 2516 &deferred_compaction,
2509 &did_some_progress); 2517 &did_some_progress);
2510 if (page) 2518 if (page)
@@ -2569,8 +2577,6 @@ retry_cpuset:
2569 page = __alloc_pages_slowpath(gfp_mask, order, 2577 page = __alloc_pages_slowpath(gfp_mask, order,
2570 zonelist, high_zoneidx, nodemask, 2578 zonelist, high_zoneidx, nodemask,
2571 preferred_zone, migratetype); 2579 preferred_zone, migratetype);
2572 else
2573 page->pfmemalloc = false;
2574 2580
2575 trace_mm_page_alloc(page, order, gfp_mask, migratetype); 2581 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2576 2582
diff --git a/mm/slab.c b/mm/slab.c
index f8b0d539b482..811af03a14ef 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3260,6 +3260,7 @@ force_grow:
3260 3260
3261 /* cache_grow can reenable interrupts, then ac could change. */ 3261 /* cache_grow can reenable interrupts, then ac could change. */
3262 ac = cpu_cache_get(cachep); 3262 ac = cpu_cache_get(cachep);
3263 node = numa_mem_id();
3263 3264
3264 /* no objects in sight? abort */ 3265 /* no objects in sight? abort */
3265 if (!x && (ac->avail == 0 || force_refill)) 3266 if (!x && (ac->avail == 0 || force_refill))
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 73a2a83ee2da..402442402af7 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -137,9 +137,21 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
137 return rc; 137 return rc;
138} 138}
139 139
140static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb)
141{
142#ifdef CONFIG_NET_POLL_CONTROLLER
143 if (vlan->netpoll)
144 netpoll_send_skb(vlan->netpoll, skb);
145#else
146 BUG();
147#endif
148 return NETDEV_TX_OK;
149}
150
140static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, 151static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
141 struct net_device *dev) 152 struct net_device *dev)
142{ 153{
154 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
143 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); 155 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
144 unsigned int len; 156 unsigned int len;
145 int ret; 157 int ret;
@@ -150,29 +162,30 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
150 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... 162 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
151 */ 163 */
152 if (veth->h_vlan_proto != htons(ETH_P_8021Q) || 164 if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
153 vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR) { 165 vlan->flags & VLAN_FLAG_REORDER_HDR) {
154 u16 vlan_tci; 166 u16 vlan_tci;
155 vlan_tci = vlan_dev_priv(dev)->vlan_id; 167 vlan_tci = vlan->vlan_id;
156 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); 168 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
157 skb = __vlan_hwaccel_put_tag(skb, vlan_tci); 169 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
158 } 170 }
159 171
160 skb->dev = vlan_dev_priv(dev)->real_dev; 172 skb->dev = vlan->real_dev;
161 len = skb->len; 173 len = skb->len;
162 if (netpoll_tx_running(dev)) 174 if (unlikely(netpoll_tx_running(dev)))
163 return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); 175 return vlan_netpoll_send_skb(vlan, skb);
176
164 ret = dev_queue_xmit(skb); 177 ret = dev_queue_xmit(skb);
165 178
166 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 179 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
167 struct vlan_pcpu_stats *stats; 180 struct vlan_pcpu_stats *stats;
168 181
169 stats = this_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats); 182 stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
170 u64_stats_update_begin(&stats->syncp); 183 u64_stats_update_begin(&stats->syncp);
171 stats->tx_packets++; 184 stats->tx_packets++;
172 stats->tx_bytes += len; 185 stats->tx_bytes += len;
173 u64_stats_update_end(&stats->syncp); 186 u64_stats_update_end(&stats->syncp);
174 } else { 187 } else {
175 this_cpu_inc(vlan_dev_priv(dev)->vlan_pcpu_stats->tx_dropped); 188 this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);
176 } 189 }
177 190
178 return ret; 191 return ret;
@@ -669,25 +682,26 @@ static void vlan_dev_poll_controller(struct net_device *dev)
669 return; 682 return;
670} 683}
671 684
672static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo) 685static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo,
686 gfp_t gfp)
673{ 687{
674 struct vlan_dev_priv *info = vlan_dev_priv(dev); 688 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
675 struct net_device *real_dev = info->real_dev; 689 struct net_device *real_dev = vlan->real_dev;
676 struct netpoll *netpoll; 690 struct netpoll *netpoll;
677 int err = 0; 691 int err = 0;
678 692
679 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); 693 netpoll = kzalloc(sizeof(*netpoll), gfp);
680 err = -ENOMEM; 694 err = -ENOMEM;
681 if (!netpoll) 695 if (!netpoll)
682 goto out; 696 goto out;
683 697
684 err = __netpoll_setup(netpoll, real_dev); 698 err = __netpoll_setup(netpoll, real_dev, gfp);
685 if (err) { 699 if (err) {
686 kfree(netpoll); 700 kfree(netpoll);
687 goto out; 701 goto out;
688 } 702 }
689 703
690 info->netpoll = netpoll; 704 vlan->netpoll = netpoll;
691 705
692out: 706out:
693 return err; 707 return err;
@@ -695,19 +709,15 @@ out:
695 709
696static void vlan_dev_netpoll_cleanup(struct net_device *dev) 710static void vlan_dev_netpoll_cleanup(struct net_device *dev)
697{ 711{
698 struct vlan_dev_priv *info = vlan_dev_priv(dev); 712 struct vlan_dev_priv *vlan= vlan_dev_priv(dev);
699 struct netpoll *netpoll = info->netpoll; 713 struct netpoll *netpoll = vlan->netpoll;
700 714
701 if (!netpoll) 715 if (!netpoll)
702 return; 716 return;
703 717
704 info->netpoll = NULL; 718 vlan->netpoll = NULL;
705
706 /* Wait for transmitting packets to finish before freeing. */
707 synchronize_rcu_bh();
708 719
709 __netpoll_cleanup(netpoll); 720 __netpoll_free_rcu(netpoll);
710 kfree(netpoll);
711} 721}
712#endif /* CONFIG_NET_POLL_CONTROLLER */ 722#endif /* CONFIG_NET_POLL_CONTROLLER */
713 723
diff --git a/net/atm/common.c b/net/atm/common.c
index b4b44dbed645..0c0ad930a632 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -812,6 +812,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
812 812
813 if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags)) 813 if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
814 return -ENOTCONN; 814 return -ENOTCONN;
815 memset(&pvc, 0, sizeof(pvc));
815 pvc.sap_family = AF_ATMPVC; 816 pvc.sap_family = AF_ATMPVC;
816 pvc.sap_addr.itf = vcc->dev->number; 817 pvc.sap_addr.itf = vcc->dev->number;
817 pvc.sap_addr.vpi = vcc->vpi; 818 pvc.sap_addr.vpi = vcc->vpi;
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index 3a734919c36c..ae0324021407 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -95,6 +95,7 @@ static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
95 return -ENOTCONN; 95 return -ENOTCONN;
96 *sockaddr_len = sizeof(struct sockaddr_atmpvc); 96 *sockaddr_len = sizeof(struct sockaddr_atmpvc);
97 addr = (struct sockaddr_atmpvc *)sockaddr; 97 addr = (struct sockaddr_atmpvc *)sockaddr;
98 memset(addr, 0, sizeof(*addr));
98 addr->sap_family = AF_ATMPVC; 99 addr->sap_family = AF_ATMPVC;
99 addr->sap_addr.itf = vcc->dev->number; 100 addr->sap_addr.itf = vcc->dev->number;
100 addr->sap_addr.vpi = vcc->vpi; 101 addr->sap_addr.vpi = vcc->vpi;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 41ff978a33f9..715d7e33fba0 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1365,6 +1365,9 @@ static bool hci_resolve_next_name(struct hci_dev *hdev)
1365 return false; 1365 return false;
1366 1366
1367 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 1367 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1368 if (!e)
1369 return false;
1370
1368 if (hci_resolve_name(hdev, e) == 0) { 1371 if (hci_resolve_name(hdev, e) == 0) {
1369 e->name_state = NAME_PENDING; 1372 e->name_state = NAME_PENDING;
1370 return true; 1373 return true;
@@ -1393,12 +1396,20 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1393 return; 1396 return;
1394 1397
1395 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 1398 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1396 if (e) { 1399 /* If the device was not found in a list of found devices names of which
1400 * are pending. there is no need to continue resolving a next name as it
1401 * will be done upon receiving another Remote Name Request Complete
1402 * Event */
1403 if (!e)
1404 return;
1405
1406 list_del(&e->list);
1407 if (name) {
1397 e->name_state = NAME_KNOWN; 1408 e->name_state = NAME_KNOWN;
1398 list_del(&e->list); 1409 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1399 if (name) 1410 e->data.rssi, name, name_len);
1400 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, 1411 } else {
1401 e->data.rssi, name, name_len); 1412 e->name_state = NAME_NOT_KNOWN;
1402 } 1413 }
1403 1414
1404 if (hci_resolve_next_name(hdev)) 1415 if (hci_resolve_next_name(hdev))
@@ -1762,7 +1773,12 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1762 if (conn->type == ACL_LINK) { 1773 if (conn->type == ACL_LINK) {
1763 conn->state = BT_CONFIG; 1774 conn->state = BT_CONFIG;
1764 hci_conn_hold(conn); 1775 hci_conn_hold(conn);
1765 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1776
1777 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1778 !hci_find_link_key(hdev, &ev->bdaddr))
1779 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1780 else
1781 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1766 } else 1782 } else
1767 conn->state = BT_CONNECTED; 1783 conn->state = BT_CONNECTED;
1768 1784
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index a7f04de03d79..19fdac78e555 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -694,6 +694,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
694 *addr_len = sizeof(*haddr); 694 *addr_len = sizeof(*haddr);
695 haddr->hci_family = AF_BLUETOOTH; 695 haddr->hci_family = AF_BLUETOOTH;
696 haddr->hci_dev = hdev->id; 696 haddr->hci_dev = hdev->id;
697 haddr->hci_channel= 0;
697 698
698 release_sock(sk); 699 release_sock(sk);
699 return 0; 700 return 0;
@@ -1009,6 +1010,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1009 { 1010 {
1010 struct hci_filter *f = &hci_pi(sk)->filter; 1011 struct hci_filter *f = &hci_pi(sk)->filter;
1011 1012
1013 memset(&uf, 0, sizeof(uf));
1012 uf.type_mask = f->type_mask; 1014 uf.type_mask = f->type_mask;
1013 uf.opcode = f->opcode; 1015 uf.opcode = f->opcode;
1014 uf.event_mask[0] = *((u32 *) f->event_mask + 0); 1016 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a8964db04bfb..daa149b7003c 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1181,6 +1181,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1181 sk = chan->sk; 1181 sk = chan->sk;
1182 1182
1183 hci_conn_hold(conn->hcon); 1183 hci_conn_hold(conn->hcon);
1184 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1184 1185
1185 bacpy(&bt_sk(sk)->src, conn->src); 1186 bacpy(&bt_sk(sk)->src, conn->src);
1186 bacpy(&bt_sk(sk)->dst, conn->dst); 1187 bacpy(&bt_sk(sk)->dst, conn->dst);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index a4bb27e8427e..1497edd191a2 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -245,6 +245,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
245 245
246 BT_DBG("sock %p, sk %p", sock, sk); 246 BT_DBG("sock %p, sk %p", sock, sk);
247 247
248 memset(la, 0, sizeof(struct sockaddr_l2));
248 addr->sa_family = AF_BLUETOOTH; 249 addr->sa_family = AF_BLUETOOTH;
249 *len = sizeof(struct sockaddr_l2); 250 *len = sizeof(struct sockaddr_l2);
250 251
@@ -1174,7 +1175,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
1174 1175
1175 chan = l2cap_chan_create(); 1176 chan = l2cap_chan_create();
1176 if (!chan) { 1177 if (!chan) {
1177 l2cap_sock_kill(sk); 1178 sk_free(sk);
1178 return NULL; 1179 return NULL;
1179 } 1180 }
1180 1181
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 7e1e59645c05..1a17850d093c 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -528,6 +528,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
528 528
529 BT_DBG("sock %p, sk %p", sock, sk); 529 BT_DBG("sock %p, sk %p", sock, sk);
530 530
531 memset(sa, 0, sizeof(*sa));
531 sa->rc_family = AF_BLUETOOTH; 532 sa->rc_family = AF_BLUETOOTH;
532 sa->rc_channel = rfcomm_pi(sk)->channel; 533 sa->rc_channel = rfcomm_pi(sk)->channel;
533 if (peer) 534 if (peer)
@@ -822,6 +823,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
822 } 823 }
823 824
824 sec.level = rfcomm_pi(sk)->sec_level; 825 sec.level = rfcomm_pi(sk)->sec_level;
826 sec.key_size = 0;
825 827
826 len = min_t(unsigned int, len, sizeof(sec)); 828 len = min_t(unsigned int, len, sizeof(sec));
827 if (copy_to_user(optval, (char *) &sec, len)) 829 if (copy_to_user(optval, (char *) &sec, len))
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index cb960773c002..56f182393c4c 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -456,7 +456,7 @@ static int rfcomm_get_dev_list(void __user *arg)
456 456
457 size = sizeof(*dl) + dev_num * sizeof(*di); 457 size = sizeof(*dl) + dev_num * sizeof(*di);
458 458
459 dl = kmalloc(size, GFP_KERNEL); 459 dl = kzalloc(size, GFP_KERNEL);
460 if (!dl) 460 if (!dl)
461 return -ENOMEM; 461 return -ENOMEM;
462 462
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 40bbe25dcff7..3589e21edb09 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -131,6 +131,15 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
131 sco_sock_clear_timer(sk); 131 sco_sock_clear_timer(sk);
132 sco_chan_del(sk, err); 132 sco_chan_del(sk, err);
133 bh_unlock_sock(sk); 133 bh_unlock_sock(sk);
134
135 sco_conn_lock(conn);
136 conn->sk = NULL;
137 sco_pi(sk)->conn = NULL;
138 sco_conn_unlock(conn);
139
140 if (conn->hcon)
141 hci_conn_put(conn->hcon);
142
134 sco_sock_kill(sk); 143 sco_sock_kill(sk);
135 } 144 }
136 145
@@ -821,16 +830,6 @@ static void sco_chan_del(struct sock *sk, int err)
821 830
822 BT_DBG("sk %p, conn %p, err %d", sk, conn, err); 831 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
823 832
824 if (conn) {
825 sco_conn_lock(conn);
826 conn->sk = NULL;
827 sco_pi(sk)->conn = NULL;
828 sco_conn_unlock(conn);
829
830 if (conn->hcon)
831 hci_conn_put(conn->hcon);
832 }
833
834 sk->sk_state = BT_CLOSED; 833 sk->sk_state = BT_CLOSED;
835 sk->sk_err = err; 834 sk->sk_err = err;
836 sk->sk_state_change(sk); 835 sk->sk_state_change(sk);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 16ef0dc85a0a..901a616c8083 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -579,8 +579,11 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
579 579
580 if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) 580 if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
581 smp = smp_chan_create(conn); 581 smp = smp_chan_create(conn);
582 else
583 smp = conn->smp_chan;
582 584
583 smp = conn->smp_chan; 585 if (!smp)
586 return SMP_UNSPECIFIED;
584 587
585 smp->preq[0] = SMP_CMD_PAIRING_REQ; 588 smp->preq[0] = SMP_CMD_PAIRING_REQ;
586 memcpy(&smp->preq[1], req, sizeof(*req)); 589 memcpy(&smp->preq[1], req, sizeof(*req));
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 333484537600..070e8a68cfc6 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -31,9 +31,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
31 struct net_bridge_mdb_entry *mdst; 31 struct net_bridge_mdb_entry *mdst;
32 struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); 32 struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
33 33
34 rcu_read_lock();
34#ifdef CONFIG_BRIDGE_NETFILTER 35#ifdef CONFIG_BRIDGE_NETFILTER
35 if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) { 36 if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
36 br_nf_pre_routing_finish_bridge_slow(skb); 37 br_nf_pre_routing_finish_bridge_slow(skb);
38 rcu_read_unlock();
37 return NETDEV_TX_OK; 39 return NETDEV_TX_OK;
38 } 40 }
39#endif 41#endif
@@ -48,7 +50,6 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
48 skb_reset_mac_header(skb); 50 skb_reset_mac_header(skb);
49 skb_pull(skb, ETH_HLEN); 51 skb_pull(skb, ETH_HLEN);
50 52
51 rcu_read_lock();
52 if (is_broadcast_ether_addr(dest)) 53 if (is_broadcast_ether_addr(dest))
53 br_flood_deliver(br, skb); 54 br_flood_deliver(br, skb);
54 else if (is_multicast_ether_addr(dest)) { 55 else if (is_multicast_ether_addr(dest)) {
@@ -206,24 +207,23 @@ static void br_poll_controller(struct net_device *br_dev)
206static void br_netpoll_cleanup(struct net_device *dev) 207static void br_netpoll_cleanup(struct net_device *dev)
207{ 208{
208 struct net_bridge *br = netdev_priv(dev); 209 struct net_bridge *br = netdev_priv(dev);
209 struct net_bridge_port *p, *n; 210 struct net_bridge_port *p;
210 211
211 list_for_each_entry_safe(p, n, &br->port_list, list) { 212 list_for_each_entry(p, &br->port_list, list)
212 br_netpoll_disable(p); 213 br_netpoll_disable(p);
213 }
214} 214}
215 215
216static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) 216static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
217 gfp_t gfp)
217{ 218{
218 struct net_bridge *br = netdev_priv(dev); 219 struct net_bridge *br = netdev_priv(dev);
219 struct net_bridge_port *p, *n; 220 struct net_bridge_port *p;
220 int err = 0; 221 int err = 0;
221 222
222 list_for_each_entry_safe(p, n, &br->port_list, list) { 223 list_for_each_entry(p, &br->port_list, list) {
223 if (!p->dev) 224 if (!p->dev)
224 continue; 225 continue;
225 226 err = br_netpoll_enable(p, gfp);
226 err = br_netpoll_enable(p);
227 if (err) 227 if (err)
228 goto fail; 228 goto fail;
229 } 229 }
@@ -236,17 +236,17 @@ fail:
236 goto out; 236 goto out;
237} 237}
238 238
239int br_netpoll_enable(struct net_bridge_port *p) 239int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
240{ 240{
241 struct netpoll *np; 241 struct netpoll *np;
242 int err = 0; 242 int err = 0;
243 243
244 np = kzalloc(sizeof(*p->np), GFP_KERNEL); 244 np = kzalloc(sizeof(*p->np), gfp);
245 err = -ENOMEM; 245 err = -ENOMEM;
246 if (!np) 246 if (!np)
247 goto out; 247 goto out;
248 248
249 err = __netpoll_setup(np, p->dev); 249 err = __netpoll_setup(np, p->dev, gfp);
250 if (err) { 250 if (err) {
251 kfree(np); 251 kfree(np);
252 goto out; 252 goto out;
@@ -267,11 +267,7 @@ void br_netpoll_disable(struct net_bridge_port *p)
267 267
268 p->np = NULL; 268 p->np = NULL;
269 269
270 /* Wait for transmitting packets to finish before freeing. */ 270 __netpoll_free_rcu(np);
271 synchronize_rcu_bh();
272
273 __netpoll_cleanup(np);
274 kfree(np);
275} 271}
276 272
277#endif 273#endif
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index e9466d412707..02015a505d2a 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -65,7 +65,7 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
65{ 65{
66 skb->dev = to->dev; 66 skb->dev = to->dev;
67 67
68 if (unlikely(netpoll_tx_running(to->dev))) { 68 if (unlikely(netpoll_tx_running(to->br->dev))) {
69 if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) 69 if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
70 kfree_skb(skb); 70 kfree_skb(skb);
71 else { 71 else {
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index e1144e1617be..1c8fdc3558cd 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -361,7 +361,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
361 if (err) 361 if (err)
362 goto err2; 362 goto err2;
363 363
364 if (br_netpoll_info(br) && ((err = br_netpoll_enable(p)))) 364 if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL))))
365 goto err3; 365 goto err3;
366 366
367 err = netdev_set_master(dev, br->dev); 367 err = netdev_set_master(dev, br->dev);
@@ -427,6 +427,10 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
427 if (!p || p->br != br) 427 if (!p || p->br != br)
428 return -EINVAL; 428 return -EINVAL;
429 429
430 /* Since more than one interface can be attached to a bridge,
431 * there still maybe an alternate path for netconsole to use;
432 * therefore there is no reason for a NETDEV_RELEASE event.
433 */
430 del_nbp(p); 434 del_nbp(p);
431 435
432 spin_lock_bh(&br->lock); 436 spin_lock_bh(&br->lock);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index a768b2408edf..f507d2af9646 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -316,7 +316,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
316 netpoll_send_skb(np, skb); 316 netpoll_send_skb(np, skb);
317} 317}
318 318
319extern int br_netpoll_enable(struct net_bridge_port *p); 319extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
320extern void br_netpoll_disable(struct net_bridge_port *p); 320extern void br_netpoll_disable(struct net_bridge_port *p);
321#else 321#else
322static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br) 322static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
@@ -329,7 +329,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
329{ 329{
330} 330}
331 331
332static inline int br_netpoll_enable(struct net_bridge_port *p) 332static inline int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
333{ 333{
334 return 0; 334 return 0;
335} 335}
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 69771c04ba8f..e597733affb8 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -94,6 +94,10 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
94 94
95 /* check the version of IP */ 95 /* check the version of IP */
96 ip_version = skb_header_pointer(skb, 0, 1, &buf); 96 ip_version = skb_header_pointer(skb, 0, 1, &buf);
97 if (!ip_version) {
98 kfree_skb(skb);
99 return -EINVAL;
100 }
97 101
98 switch (*ip_version >> 4) { 102 switch (*ip_version >> 4) {
99 case 4: 103 case 4:
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 69e38db28e5f..a8020293f342 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -84,7 +84,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
84 return -1; 84 return -1;
85 } 85 }
86 } else { 86 } else {
87 pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
88 memcpy(&client->fsid, fsid, sizeof(*fsid)); 87 memcpy(&client->fsid, fsid, sizeof(*fsid));
89 } 88 }
90 return 0; 89 return 0;
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 54b531a01121..38b5dc1823d4 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -189,6 +189,9 @@ int ceph_debugfs_client_init(struct ceph_client *client)
189 snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid, 189 snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,
190 client->monc.auth->global_id); 190 client->monc.auth->global_id);
191 191
192 dout("ceph_debugfs_client_init %p %s\n", client, name);
193
194 BUG_ON(client->debugfs_dir);
192 client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir); 195 client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
193 if (!client->debugfs_dir) 196 if (!client->debugfs_dir)
194 goto out; 197 goto out;
@@ -234,6 +237,7 @@ out:
234 237
235void ceph_debugfs_client_cleanup(struct ceph_client *client) 238void ceph_debugfs_client_cleanup(struct ceph_client *client)
236{ 239{
240 dout("ceph_debugfs_client_cleanup %p\n", client);
237 debugfs_remove(client->debugfs_osdmap); 241 debugfs_remove(client->debugfs_osdmap);
238 debugfs_remove(client->debugfs_monmap); 242 debugfs_remove(client->debugfs_monmap);
239 debugfs_remove(client->osdc.debugfs_file); 243 debugfs_remove(client->osdc.debugfs_file);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index b9796750034a..24c5eea8c45b 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -915,7 +915,6 @@ static int prepare_write_connect(struct ceph_connection *con)
915 con->out_connect.authorizer_len = auth ? 915 con->out_connect.authorizer_len = auth ?
916 cpu_to_le32(auth->authorizer_buf_len) : 0; 916 cpu_to_le32(auth->authorizer_buf_len) : 0;
917 917
918 con_out_kvec_reset(con);
919 con_out_kvec_add(con, sizeof (con->out_connect), 918 con_out_kvec_add(con, sizeof (con->out_connect),
920 &con->out_connect); 919 &con->out_connect);
921 if (auth && auth->authorizer_buf_len) 920 if (auth && auth->authorizer_buf_len)
@@ -1557,6 +1556,7 @@ static int process_connect(struct ceph_connection *con)
1557 return -1; 1556 return -1;
1558 } 1557 }
1559 con->auth_retry = 1; 1558 con->auth_retry = 1;
1559 con_out_kvec_reset(con);
1560 ret = prepare_write_connect(con); 1560 ret = prepare_write_connect(con);
1561 if (ret < 0) 1561 if (ret < 0)
1562 return ret; 1562 return ret;
@@ -1577,6 +1577,7 @@ static int process_connect(struct ceph_connection *con)
1577 ENTITY_NAME(con->peer_name), 1577 ENTITY_NAME(con->peer_name),
1578 ceph_pr_addr(&con->peer_addr.in_addr)); 1578 ceph_pr_addr(&con->peer_addr.in_addr));
1579 reset_connection(con); 1579 reset_connection(con);
1580 con_out_kvec_reset(con);
1580 ret = prepare_write_connect(con); 1581 ret = prepare_write_connect(con);
1581 if (ret < 0) 1582 if (ret < 0)
1582 return ret; 1583 return ret;
@@ -1601,6 +1602,7 @@ static int process_connect(struct ceph_connection *con)
1601 le32_to_cpu(con->out_connect.connect_seq), 1602 le32_to_cpu(con->out_connect.connect_seq),
1602 le32_to_cpu(con->in_reply.connect_seq)); 1603 le32_to_cpu(con->in_reply.connect_seq));
1603 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); 1604 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
1605 con_out_kvec_reset(con);
1604 ret = prepare_write_connect(con); 1606 ret = prepare_write_connect(con);
1605 if (ret < 0) 1607 if (ret < 0)
1606 return ret; 1608 return ret;
@@ -1617,6 +1619,7 @@ static int process_connect(struct ceph_connection *con)
1617 le32_to_cpu(con->in_reply.global_seq)); 1619 le32_to_cpu(con->in_reply.global_seq));
1618 get_global_seq(con->msgr, 1620 get_global_seq(con->msgr,
1619 le32_to_cpu(con->in_reply.global_seq)); 1621 le32_to_cpu(con->in_reply.global_seq));
1622 con_out_kvec_reset(con);
1620 ret = prepare_write_connect(con); 1623 ret = prepare_write_connect(con);
1621 if (ret < 0) 1624 if (ret < 0)
1622 return ret; 1625 return ret;
@@ -2135,7 +2138,11 @@ more:
2135 BUG_ON(con->state != CON_STATE_CONNECTING); 2138 BUG_ON(con->state != CON_STATE_CONNECTING);
2136 con->state = CON_STATE_NEGOTIATING; 2139 con->state = CON_STATE_NEGOTIATING;
2137 2140
2138 /* Banner is good, exchange connection info */ 2141 /*
2142 * Received banner is good, exchange connection info.
2143 * Do not reset out_kvec, as sending our banner raced
2144 * with receiving peer banner after connect completed.
2145 */
2139 ret = prepare_write_connect(con); 2146 ret = prepare_write_connect(con);
2140 if (ret < 0) 2147 if (ret < 0)
2141 goto out; 2148 goto out;
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 105d533b55f3..900ea0f043fc 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -311,6 +311,17 @@ int ceph_monc_open_session(struct ceph_mon_client *monc)
311EXPORT_SYMBOL(ceph_monc_open_session); 311EXPORT_SYMBOL(ceph_monc_open_session);
312 312
313/* 313/*
314 * We require the fsid and global_id in order to initialize our
315 * debugfs dir.
316 */
317static bool have_debugfs_info(struct ceph_mon_client *monc)
318{
319 dout("have_debugfs_info fsid %d globalid %lld\n",
320 (int)monc->client->have_fsid, monc->auth->global_id);
321 return monc->client->have_fsid && monc->auth->global_id > 0;
322}
323
324/*
314 * The monitor responds with mount ack indicate mount success. The 325 * The monitor responds with mount ack indicate mount success. The
315 * included client ticket allows the client to talk to MDSs and OSDs. 326 * included client ticket allows the client to talk to MDSs and OSDs.
316 */ 327 */
@@ -320,9 +331,12 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
320 struct ceph_client *client = monc->client; 331 struct ceph_client *client = monc->client;
321 struct ceph_monmap *monmap = NULL, *old = monc->monmap; 332 struct ceph_monmap *monmap = NULL, *old = monc->monmap;
322 void *p, *end; 333 void *p, *end;
334 int had_debugfs_info, init_debugfs = 0;
323 335
324 mutex_lock(&monc->mutex); 336 mutex_lock(&monc->mutex);
325 337
338 had_debugfs_info = have_debugfs_info(monc);
339
326 dout("handle_monmap\n"); 340 dout("handle_monmap\n");
327 p = msg->front.iov_base; 341 p = msg->front.iov_base;
328 end = p + msg->front.iov_len; 342 end = p + msg->front.iov_len;
@@ -344,12 +358,22 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
344 358
345 if (!client->have_fsid) { 359 if (!client->have_fsid) {
346 client->have_fsid = true; 360 client->have_fsid = true;
361 if (!had_debugfs_info && have_debugfs_info(monc)) {
362 pr_info("client%lld fsid %pU\n",
363 ceph_client_id(monc->client),
364 &monc->client->fsid);
365 init_debugfs = 1;
366 }
347 mutex_unlock(&monc->mutex); 367 mutex_unlock(&monc->mutex);
348 /* 368
349 * do debugfs initialization without mutex to avoid 369 if (init_debugfs) {
350 * creating a locking dependency 370 /*
351 */ 371 * do debugfs initialization without mutex to avoid
352 ceph_debugfs_client_init(client); 372 * creating a locking dependency
373 */
374 ceph_debugfs_client_init(monc->client);
375 }
376
353 goto out_unlocked; 377 goto out_unlocked;
354 } 378 }
355out: 379out:
@@ -865,8 +889,10 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
865{ 889{
866 int ret; 890 int ret;
867 int was_auth = 0; 891 int was_auth = 0;
892 int had_debugfs_info, init_debugfs = 0;
868 893
869 mutex_lock(&monc->mutex); 894 mutex_lock(&monc->mutex);
895 had_debugfs_info = have_debugfs_info(monc);
870 if (monc->auth->ops) 896 if (monc->auth->ops)
871 was_auth = monc->auth->ops->is_authenticated(monc->auth); 897 was_auth = monc->auth->ops->is_authenticated(monc->auth);
872 monc->pending_auth = 0; 898 monc->pending_auth = 0;
@@ -889,7 +915,22 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
889 __send_subscribe(monc); 915 __send_subscribe(monc);
890 __resend_generic_request(monc); 916 __resend_generic_request(monc);
891 } 917 }
918
919 if (!had_debugfs_info && have_debugfs_info(monc)) {
920 pr_info("client%lld fsid %pU\n",
921 ceph_client_id(monc->client),
922 &monc->client->fsid);
923 init_debugfs = 1;
924 }
892 mutex_unlock(&monc->mutex); 925 mutex_unlock(&monc->mutex);
926
927 if (init_debugfs) {
928 /*
929 * do debugfs initialization without mutex to avoid
930 * creating a locking dependency
931 */
932 ceph_debugfs_client_init(monc->client);
933 }
893} 934}
894 935
895static int __validate_auth(struct ceph_mon_client *monc) 936static int __validate_auth(struct ceph_mon_client *monc)
diff --git a/net/core/dev.c b/net/core/dev.c
index a39354ee1432..83988362805e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1642,6 +1642,19 @@ static inline int deliver_skb(struct sk_buff *skb,
1642 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1642 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1643} 1643}
1644 1644
1645static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1646{
1647 if (ptype->af_packet_priv == NULL)
1648 return false;
1649
1650 if (ptype->id_match)
1651 return ptype->id_match(ptype, skb->sk);
1652 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1653 return true;
1654
1655 return false;
1656}
1657
1645/* 1658/*
1646 * Support routine. Sends outgoing frames to any network 1659 * Support routine. Sends outgoing frames to any network
1647 * taps currently in use. 1660 * taps currently in use.
@@ -1659,8 +1672,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1659 * they originated from - MvS (miquels@drinkel.ow.org) 1672 * they originated from - MvS (miquels@drinkel.ow.org)
1660 */ 1673 */
1661 if ((ptype->dev == dev || !ptype->dev) && 1674 if ((ptype->dev == dev || !ptype->dev) &&
1662 (ptype->af_packet_priv == NULL || 1675 (!skb_loop_sk(ptype, skb))) {
1663 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1664 if (pt_prev) { 1676 if (pt_prev) {
1665 deliver_skb(skb2, pt_prev, skb->dev); 1677 deliver_skb(skb2, pt_prev, skb->dev);
1666 pt_prev = ptype; 1678 pt_prev = ptype;
@@ -5732,6 +5744,7 @@ EXPORT_SYMBOL(netdev_refcnt_read);
5732 5744
5733/** 5745/**
5734 * netdev_wait_allrefs - wait until all references are gone. 5746 * netdev_wait_allrefs - wait until all references are gone.
5747 * @dev: target net_device
5735 * 5748 *
5736 * This is called when unregistering network devices. 5749 * This is called when unregistering network devices.
5737 * 5750 *
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index b4c90e42b443..e4ba3e70c174 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -26,6 +26,7 @@
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/export.h> 28#include <linux/export.h>
29#include <linux/if_vlan.h>
29#include <net/tcp.h> 30#include <net/tcp.h>
30#include <net/udp.h> 31#include <net/udp.h>
31#include <asm/unaligned.h> 32#include <asm/unaligned.h>
@@ -54,7 +55,7 @@ static atomic_t trapped;
54 MAX_UDP_CHUNK) 55 MAX_UDP_CHUNK)
55 56
56static void zap_completion_queue(void); 57static void zap_completion_queue(void);
57static void arp_reply(struct sk_buff *skb); 58static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
58 59
59static unsigned int carrier_timeout = 4; 60static unsigned int carrier_timeout = 4;
60module_param(carrier_timeout, uint, 0644); 61module_param(carrier_timeout, uint, 0644);
@@ -170,7 +171,8 @@ static void poll_napi(struct net_device *dev)
170 list_for_each_entry(napi, &dev->napi_list, dev_list) { 171 list_for_each_entry(napi, &dev->napi_list, dev_list) {
171 if (napi->poll_owner != smp_processor_id() && 172 if (napi->poll_owner != smp_processor_id() &&
172 spin_trylock(&napi->poll_lock)) { 173 spin_trylock(&napi->poll_lock)) {
173 budget = poll_one_napi(dev->npinfo, napi, budget); 174 budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
175 napi, budget);
174 spin_unlock(&napi->poll_lock); 176 spin_unlock(&napi->poll_lock);
175 177
176 if (!budget) 178 if (!budget)
@@ -185,13 +187,14 @@ static void service_arp_queue(struct netpoll_info *npi)
185 struct sk_buff *skb; 187 struct sk_buff *skb;
186 188
187 while ((skb = skb_dequeue(&npi->arp_tx))) 189 while ((skb = skb_dequeue(&npi->arp_tx)))
188 arp_reply(skb); 190 netpoll_arp_reply(skb, npi);
189 } 191 }
190} 192}
191 193
192static void netpoll_poll_dev(struct net_device *dev) 194static void netpoll_poll_dev(struct net_device *dev)
193{ 195{
194 const struct net_device_ops *ops; 196 const struct net_device_ops *ops;
197 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
195 198
196 if (!dev || !netif_running(dev)) 199 if (!dev || !netif_running(dev))
197 return; 200 return;
@@ -206,17 +209,18 @@ static void netpoll_poll_dev(struct net_device *dev)
206 poll_napi(dev); 209 poll_napi(dev);
207 210
208 if (dev->flags & IFF_SLAVE) { 211 if (dev->flags & IFF_SLAVE) {
209 if (dev->npinfo) { 212 if (ni) {
210 struct net_device *bond_dev = dev->master; 213 struct net_device *bond_dev = dev->master;
211 struct sk_buff *skb; 214 struct sk_buff *skb;
212 while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) { 215 struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo);
216 while ((skb = skb_dequeue(&ni->arp_tx))) {
213 skb->dev = bond_dev; 217 skb->dev = bond_dev;
214 skb_queue_tail(&bond_dev->npinfo->arp_tx, skb); 218 skb_queue_tail(&bond_ni->arp_tx, skb);
215 } 219 }
216 } 220 }
217 } 221 }
218 222
219 service_arp_queue(dev->npinfo); 223 service_arp_queue(ni);
220 224
221 zap_completion_queue(); 225 zap_completion_queue();
222} 226}
@@ -302,6 +306,7 @@ static int netpoll_owner_active(struct net_device *dev)
302 return 0; 306 return 0;
303} 307}
304 308
309/* call with IRQ disabled */
305void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, 310void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
306 struct net_device *dev) 311 struct net_device *dev)
307{ 312{
@@ -309,8 +314,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
309 unsigned long tries; 314 unsigned long tries;
310 const struct net_device_ops *ops = dev->netdev_ops; 315 const struct net_device_ops *ops = dev->netdev_ops;
311 /* It is up to the caller to keep npinfo alive. */ 316 /* It is up to the caller to keep npinfo alive. */
312 struct netpoll_info *npinfo = np->dev->npinfo; 317 struct netpoll_info *npinfo;
318
319 WARN_ON_ONCE(!irqs_disabled());
313 320
321 npinfo = rcu_dereference_bh(np->dev->npinfo);
314 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { 322 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
315 __kfree_skb(skb); 323 __kfree_skb(skb);
316 return; 324 return;
@@ -319,16 +327,22 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
319 /* don't get messages out of order, and no recursion */ 327 /* don't get messages out of order, and no recursion */
320 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 328 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
321 struct netdev_queue *txq; 329 struct netdev_queue *txq;
322 unsigned long flags;
323 330
324 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 331 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
325 332
326 local_irq_save(flags);
327 /* try until next clock tick */ 333 /* try until next clock tick */
328 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 334 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
329 tries > 0; --tries) { 335 tries > 0; --tries) {
330 if (__netif_tx_trylock(txq)) { 336 if (__netif_tx_trylock(txq)) {
331 if (!netif_xmit_stopped(txq)) { 337 if (!netif_xmit_stopped(txq)) {
338 if (vlan_tx_tag_present(skb) &&
339 !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) {
340 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
341 if (unlikely(!skb))
342 break;
343 skb->vlan_tci = 0;
344 }
345
332 status = ops->ndo_start_xmit(skb, dev); 346 status = ops->ndo_start_xmit(skb, dev);
333 if (status == NETDEV_TX_OK) 347 if (status == NETDEV_TX_OK)
334 txq_trans_update(txq); 348 txq_trans_update(txq);
@@ -347,10 +361,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
347 } 361 }
348 362
349 WARN_ONCE(!irqs_disabled(), 363 WARN_ONCE(!irqs_disabled(),
350 "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n", 364 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
351 dev->name, ops->ndo_start_xmit); 365 dev->name, ops->ndo_start_xmit);
352 366
353 local_irq_restore(flags);
354 } 367 }
355 368
356 if (status != NETDEV_TX_OK) { 369 if (status != NETDEV_TX_OK) {
@@ -423,9 +436,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
423} 436}
424EXPORT_SYMBOL(netpoll_send_udp); 437EXPORT_SYMBOL(netpoll_send_udp);
425 438
426static void arp_reply(struct sk_buff *skb) 439static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
427{ 440{
428 struct netpoll_info *npinfo = skb->dev->npinfo;
429 struct arphdr *arp; 441 struct arphdr *arp;
430 unsigned char *arp_ptr; 442 unsigned char *arp_ptr;
431 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; 443 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
@@ -543,13 +555,12 @@ static void arp_reply(struct sk_buff *skb)
543 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 555 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
544} 556}
545 557
546int __netpoll_rx(struct sk_buff *skb) 558int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
547{ 559{
548 int proto, len, ulen; 560 int proto, len, ulen;
549 int hits = 0; 561 int hits = 0;
550 const struct iphdr *iph; 562 const struct iphdr *iph;
551 struct udphdr *uh; 563 struct udphdr *uh;
552 struct netpoll_info *npinfo = skb->dev->npinfo;
553 struct netpoll *np, *tmp; 564 struct netpoll *np, *tmp;
554 565
555 if (list_empty(&npinfo->rx_np)) 566 if (list_empty(&npinfo->rx_np))
@@ -565,6 +576,12 @@ int __netpoll_rx(struct sk_buff *skb)
565 return 1; 576 return 1;
566 } 577 }
567 578
579 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
580 skb = vlan_untag(skb);
581 if (unlikely(!skb))
582 goto out;
583 }
584
568 proto = ntohs(eth_hdr(skb)->h_proto); 585 proto = ntohs(eth_hdr(skb)->h_proto);
569 if (proto != ETH_P_IP) 586 if (proto != ETH_P_IP)
570 goto out; 587 goto out;
@@ -715,7 +732,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
715} 732}
716EXPORT_SYMBOL(netpoll_parse_options); 733EXPORT_SYMBOL(netpoll_parse_options);
717 734
718int __netpoll_setup(struct netpoll *np, struct net_device *ndev) 735int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
719{ 736{
720 struct netpoll_info *npinfo; 737 struct netpoll_info *npinfo;
721 const struct net_device_ops *ops; 738 const struct net_device_ops *ops;
@@ -734,7 +751,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
734 } 751 }
735 752
736 if (!ndev->npinfo) { 753 if (!ndev->npinfo) {
737 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); 754 npinfo = kmalloc(sizeof(*npinfo), gfp);
738 if (!npinfo) { 755 if (!npinfo) {
739 err = -ENOMEM; 756 err = -ENOMEM;
740 goto out; 757 goto out;
@@ -752,7 +769,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
752 769
753 ops = np->dev->netdev_ops; 770 ops = np->dev->netdev_ops;
754 if (ops->ndo_netpoll_setup) { 771 if (ops->ndo_netpoll_setup) {
755 err = ops->ndo_netpoll_setup(ndev, npinfo); 772 err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
756 if (err) 773 if (err)
757 goto free_npinfo; 774 goto free_npinfo;
758 } 775 }
@@ -857,7 +874,7 @@ int netpoll_setup(struct netpoll *np)
857 refill_skbs(); 874 refill_skbs();
858 875
859 rtnl_lock(); 876 rtnl_lock();
860 err = __netpoll_setup(np, ndev); 877 err = __netpoll_setup(np, ndev, GFP_KERNEL);
861 rtnl_unlock(); 878 rtnl_unlock();
862 879
863 if (err) 880 if (err)
@@ -878,6 +895,24 @@ static int __init netpoll_init(void)
878} 895}
879core_initcall(netpoll_init); 896core_initcall(netpoll_init);
880 897
898static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
899{
900 struct netpoll_info *npinfo =
901 container_of(rcu_head, struct netpoll_info, rcu);
902
903 skb_queue_purge(&npinfo->arp_tx);
904 skb_queue_purge(&npinfo->txq);
905
906 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
907 cancel_delayed_work(&npinfo->tx_work);
908
909 /* clean after last, unfinished work */
910 __skb_queue_purge(&npinfo->txq);
911 /* now cancel it again */
912 cancel_delayed_work(&npinfo->tx_work);
913 kfree(npinfo);
914}
915
881void __netpoll_cleanup(struct netpoll *np) 916void __netpoll_cleanup(struct netpoll *np)
882{ 917{
883 struct netpoll_info *npinfo; 918 struct netpoll_info *npinfo;
@@ -903,20 +938,24 @@ void __netpoll_cleanup(struct netpoll *np)
903 ops->ndo_netpoll_cleanup(np->dev); 938 ops->ndo_netpoll_cleanup(np->dev);
904 939
905 RCU_INIT_POINTER(np->dev->npinfo, NULL); 940 RCU_INIT_POINTER(np->dev->npinfo, NULL);
941 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
942 }
943}
944EXPORT_SYMBOL_GPL(__netpoll_cleanup);
906 945
907 /* avoid racing with NAPI reading npinfo */ 946static void rcu_cleanup_netpoll(struct rcu_head *rcu_head)
908 synchronize_rcu_bh(); 947{
948 struct netpoll *np = container_of(rcu_head, struct netpoll, rcu);
909 949
910 skb_queue_purge(&npinfo->arp_tx); 950 __netpoll_cleanup(np);
911 skb_queue_purge(&npinfo->txq); 951 kfree(np);
912 cancel_delayed_work_sync(&npinfo->tx_work); 952}
913 953
914 /* clean after last, unfinished work */ 954void __netpoll_free_rcu(struct netpoll *np)
915 __skb_queue_purge(&npinfo->txq); 955{
916 kfree(npinfo); 956 call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);
917 }
918} 957}
919EXPORT_SYMBOL_GPL(__netpoll_cleanup); 958EXPORT_SYMBOL_GPL(__netpoll_free_rcu);
920 959
921void netpoll_cleanup(struct netpoll *np) 960void netpoll_cleanup(struct netpoll *np)
922{ 961{
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index ed0c0431fcd8..c75e3f9d060f 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -101,12 +101,10 @@ static int write_update_netdev_table(struct net_device *dev)
101 u32 max_len; 101 u32 max_len;
102 struct netprio_map *map; 102 struct netprio_map *map;
103 103
104 rtnl_lock();
105 max_len = atomic_read(&max_prioidx) + 1; 104 max_len = atomic_read(&max_prioidx) + 1;
106 map = rtnl_dereference(dev->priomap); 105 map = rtnl_dereference(dev->priomap);
107 if (!map || map->priomap_len < max_len) 106 if (!map || map->priomap_len < max_len)
108 ret = extend_netdev_table(dev, max_len); 107 ret = extend_netdev_table(dev, max_len);
109 rtnl_unlock();
110 108
111 return ret; 109 return ret;
112} 110}
@@ -256,17 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
256 if (!dev) 254 if (!dev)
257 goto out_free_devname; 255 goto out_free_devname;
258 256
257 rtnl_lock();
259 ret = write_update_netdev_table(dev); 258 ret = write_update_netdev_table(dev);
260 if (ret < 0) 259 if (ret < 0)
261 goto out_put_dev; 260 goto out_put_dev;
262 261
263 rcu_read_lock(); 262 map = rtnl_dereference(dev->priomap);
264 map = rcu_dereference(dev->priomap);
265 if (map) 263 if (map)
266 map->priomap[prioidx] = priority; 264 map->priomap[prioidx] = priority;
267 rcu_read_unlock();
268 265
269out_put_dev: 266out_put_dev:
267 rtnl_unlock();
270 dev_put(dev); 268 dev_put(dev);
271 269
272out_free_devname: 270out_free_devname:
@@ -277,12 +275,6 @@ out_free_devname:
277void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 275void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
278{ 276{
279 struct task_struct *p; 277 struct task_struct *p;
280 char *tmp = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL);
281
282 if (!tmp) {
283 pr_warn("Unable to attach cgrp due to alloc failure!\n");
284 return;
285 }
286 278
287 cgroup_taskset_for_each(p, cgrp, tset) { 279 cgroup_taskset_for_each(p, cgrp, tset) {
288 unsigned int fd; 280 unsigned int fd;
@@ -296,32 +288,24 @@ void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
296 continue; 288 continue;
297 } 289 }
298 290
299 rcu_read_lock(); 291 spin_lock(&files->file_lock);
300 fdt = files_fdtable(files); 292 fdt = files_fdtable(files);
301 for (fd = 0; fd < fdt->max_fds; fd++) { 293 for (fd = 0; fd < fdt->max_fds; fd++) {
302 char *path;
303 struct file *file; 294 struct file *file;
304 struct socket *sock; 295 struct socket *sock;
305 unsigned long s; 296 int err;
306 int rv, err = 0;
307 297
308 file = fcheck_files(files, fd); 298 file = fcheck_files(files, fd);
309 if (!file) 299 if (!file)
310 continue; 300 continue;
311 301
312 path = d_path(&file->f_path, tmp, PAGE_SIZE);
313 rv = sscanf(path, "socket:[%lu]", &s);
314 if (rv <= 0)
315 continue;
316
317 sock = sock_from_file(file, &err); 302 sock = sock_from_file(file, &err);
318 if (!err) 303 if (sock)
319 sock_update_netprioidx(sock->sk, p); 304 sock_update_netprioidx(sock->sk, p);
320 } 305 }
321 rcu_read_unlock(); 306 spin_unlock(&files->file_lock);
322 task_unlock(p); 307 task_unlock(p);
323 } 308 }
324 kfree(tmp);
325} 309}
326 310
327static struct cftype ss_files[] = { 311static struct cftype ss_files[] = {
diff --git a/net/core/scm.c b/net/core/scm.c
index 8f6ccfd68ef4..040cebeed45b 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -265,6 +265,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
265 for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax; 265 for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
266 i++, cmfptr++) 266 i++, cmfptr++)
267 { 267 {
268 struct socket *sock;
268 int new_fd; 269 int new_fd;
269 err = security_file_receive(fp[i]); 270 err = security_file_receive(fp[i]);
270 if (err) 271 if (err)
@@ -281,6 +282,9 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
281 } 282 }
282 /* Bump the usage count and install the file. */ 283 /* Bump the usage count and install the file. */
283 get_file(fp[i]); 284 get_file(fp[i]);
285 sock = sock_from_file(fp[i], &err);
286 if (sock)
287 sock_update_netprioidx(sock->sk, current);
284 fd_install(new_fd, fp[i]); 288 fd_install(new_fd, fp[i]);
285 } 289 }
286 290
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 75c3582a7678..fb85d371a8de 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -246,7 +246,7 @@ static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk,
246 u32 __user *optval, int __user *optlen) 246 u32 __user *optval, int __user *optlen)
247{ 247{
248 int rc = -ENOPROTOOPT; 248 int rc = -ENOPROTOOPT;
249 if (ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL) 249 if (ccid != NULL && ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
250 rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len, 250 rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len,
251 optval, optlen); 251 optval, optlen);
252 return rc; 252 return rc;
@@ -257,7 +257,7 @@ static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk,
257 u32 __user *optval, int __user *optlen) 257 u32 __user *optval, int __user *optlen)
258{ 258{
259 int rc = -ENOPROTOOPT; 259 int rc = -ENOPROTOOPT;
260 if (ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL) 260 if (ccid != NULL && ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
261 rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len, 261 rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len,
262 optval, optlen); 262 optval, optlen);
263 return rc; 263 return rc;
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index d65e98798eca..119c04317d48 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -535,6 +535,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
535 case DCCP_SOCKOPT_CCID_TX_INFO: 535 case DCCP_SOCKOPT_CCID_TX_INFO:
536 if (len < sizeof(tfrc)) 536 if (len < sizeof(tfrc))
537 return -EINVAL; 537 return -EINVAL;
538 memset(&tfrc, 0, sizeof(tfrc));
538 tfrc.tfrctx_x = hc->tx_x; 539 tfrc.tfrctx_x = hc->tx_x;
539 tfrc.tfrctx_x_recv = hc->tx_x_recv; 540 tfrc.tfrctx_x_recv = hc->tx_x_recv;
540 tfrc.tfrctx_x_calc = hc->tx_x_calc; 541 tfrc.tfrctx_x_calc = hc->tx_x_calc;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index db0cf17c00f7..7f75f21d7b83 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -404,12 +404,15 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
404{ 404{
405 const struct inet_request_sock *ireq = inet_rsk(req); 405 const struct inet_request_sock *ireq = inet_rsk(req);
406 struct inet_sock *newinet = inet_sk(newsk); 406 struct inet_sock *newinet = inet_sk(newsk);
407 struct ip_options_rcu *opt = ireq->opt; 407 struct ip_options_rcu *opt;
408 struct net *net = sock_net(sk); 408 struct net *net = sock_net(sk);
409 struct flowi4 *fl4; 409 struct flowi4 *fl4;
410 struct rtable *rt; 410 struct rtable *rt;
411 411
412 fl4 = &newinet->cork.fl.u.ip4; 412 fl4 = &newinet->cork.fl.u.ip4;
413
414 rcu_read_lock();
415 opt = rcu_dereference(newinet->inet_opt);
413 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 416 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
414 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 417 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
415 sk->sk_protocol, inet_sk_flowi_flags(sk), 418 sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -421,11 +424,13 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
421 goto no_route; 424 goto no_route;
422 if (opt && opt->opt.is_strictroute && rt->rt_gateway) 425 if (opt && opt->opt.is_strictroute && rt->rt_gateway)
423 goto route_err; 426 goto route_err;
427 rcu_read_unlock();
424 return &rt->dst; 428 return &rt->dst;
425 429
426route_err: 430route_err:
427 ip_rt_put(rt); 431 ip_rt_put(rt);
428no_route: 432no_route:
433 rcu_read_unlock();
429 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); 434 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
430 return NULL; 435 return NULL;
431} 436}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 147ccc3e93db..c196d749daf2 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1338,10 +1338,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
1338 iph->ihl = 5; 1338 iph->ihl = 5;
1339 iph->tos = inet->tos; 1339 iph->tos = inet->tos;
1340 iph->frag_off = df; 1340 iph->frag_off = df;
1341 ip_select_ident(iph, &rt->dst, sk);
1342 iph->ttl = ttl; 1341 iph->ttl = ttl;
1343 iph->protocol = sk->sk_protocol; 1342 iph->protocol = sk->sk_protocol;
1344 ip_copy_addrs(iph, fl4); 1343 ip_copy_addrs(iph, fl4);
1344 ip_select_ident(iph, &rt->dst, sk);
1345 1345
1346 if (opt) { 1346 if (opt) {
1347 iph->ihl += opt->optlen>>2; 1347 iph->ihl += opt->optlen>>2;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 8eec8f4a0536..ebdf06f938bf 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
124static struct kmem_cache *mrt_cachep __read_mostly; 124static struct kmem_cache *mrt_cachep __read_mostly;
125 125
126static struct mr_table *ipmr_new_table(struct net *net, u32 id); 126static struct mr_table *ipmr_new_table(struct net *net, u32 id);
127static void ipmr_free_table(struct mr_table *mrt);
128
127static int ip_mr_forward(struct net *net, struct mr_table *mrt, 129static int ip_mr_forward(struct net *net, struct mr_table *mrt,
128 struct sk_buff *skb, struct mfc_cache *cache, 130 struct sk_buff *skb, struct mfc_cache *cache,
129 int local); 131 int local);
@@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
131 struct sk_buff *pkt, vifi_t vifi, int assert); 133 struct sk_buff *pkt, vifi_t vifi, int assert);
132static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 134static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
133 struct mfc_cache *c, struct rtmsg *rtm); 135 struct mfc_cache *c, struct rtmsg *rtm);
136static void mroute_clean_tables(struct mr_table *mrt);
134static void ipmr_expire_process(unsigned long arg); 137static void ipmr_expire_process(unsigned long arg);
135 138
136#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 139#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
271 274
272 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { 275 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
273 list_del(&mrt->list); 276 list_del(&mrt->list);
274 kfree(mrt); 277 ipmr_free_table(mrt);
275 } 278 }
276 fib_rules_unregister(net->ipv4.mr_rules_ops); 279 fib_rules_unregister(net->ipv4.mr_rules_ops);
277} 280}
@@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
299 302
300static void __net_exit ipmr_rules_exit(struct net *net) 303static void __net_exit ipmr_rules_exit(struct net *net)
301{ 304{
302 kfree(net->ipv4.mrt); 305 ipmr_free_table(net->ipv4.mrt);
303} 306}
304#endif 307#endif
305 308
@@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
336 return mrt; 339 return mrt;
337} 340}
338 341
342static void ipmr_free_table(struct mr_table *mrt)
343{
344 del_timer_sync(&mrt->ipmr_expire_timer);
345 mroute_clean_tables(mrt);
346 kfree(mrt);
347}
348
339/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ 349/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
340 350
341static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) 351static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index ea4a23813d26..9c87cde28ff8 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -148,7 +148,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
148 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, 148 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
149 hdr, NULL, &matchoff, &matchlen, 149 hdr, NULL, &matchoff, &matchlen,
150 &addr, &port) > 0) { 150 &addr, &port) > 0) {
151 unsigned int matchend, poff, plen, buflen, n; 151 unsigned int olen, matchend, poff, plen, buflen, n;
152 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; 152 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
153 153
154 /* We're only interested in headers related to this 154 /* We're only interested in headers related to this
@@ -163,17 +163,18 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
163 goto next; 163 goto next;
164 } 164 }
165 165
166 olen = *datalen;
166 if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, 167 if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
167 &addr, port)) 168 &addr, port))
168 return NF_DROP; 169 return NF_DROP;
169 170
170 matchend = matchoff + matchlen; 171 matchend = matchoff + matchlen + *datalen - olen;
171 172
172 /* The maddr= parameter (RFC 2361) specifies where to send 173 /* The maddr= parameter (RFC 2361) specifies where to send
173 * the reply. */ 174 * the reply. */
174 if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, 175 if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
175 "maddr=", &poff, &plen, 176 "maddr=", &poff, &plen,
176 &addr) > 0 && 177 &addr, true) > 0 &&
177 addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && 178 addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
178 addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) { 179 addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {
179 buflen = sprintf(buffer, "%pI4", 180 buflen = sprintf(buffer, "%pI4",
@@ -187,7 +188,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
187 * from which the server received the request. */ 188 * from which the server received the request. */
188 if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, 189 if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
189 "received=", &poff, &plen, 190 "received=", &poff, &plen,
190 &addr) > 0 && 191 &addr, false) > 0 &&
191 addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && 192 addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip &&
192 addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { 193 addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {
193 buflen = sprintf(buffer, "%pI4", 194 buflen = sprintf(buffer, "%pI4",
@@ -501,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
501 ret = nf_ct_expect_related(rtcp_exp); 502 ret = nf_ct_expect_related(rtcp_exp);
502 if (ret == 0) 503 if (ret == 0)
503 break; 504 break;
504 else if (ret != -EBUSY) { 505 else if (ret == -EBUSY) {
506 nf_ct_unexpect_related(rtp_exp);
507 continue;
508 } else if (ret < 0) {
505 nf_ct_unexpect_related(rtp_exp); 509 nf_ct_unexpect_related(rtp_exp);
506 port = 0; 510 port = 0;
507 break; 511 break;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e4ba974f143c..82cf2a722b23 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -934,12 +934,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
934 if (mtu < ip_rt_min_pmtu) 934 if (mtu < ip_rt_min_pmtu)
935 mtu = ip_rt_min_pmtu; 935 mtu = ip_rt_min_pmtu;
936 936
937 rcu_read_lock();
937 if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) { 938 if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
938 struct fib_nh *nh = &FIB_RES_NH(res); 939 struct fib_nh *nh = &FIB_RES_NH(res);
939 940
940 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, 941 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
941 jiffies + ip_rt_mtu_expires); 942 jiffies + ip_rt_mtu_expires);
942 } 943 }
944 rcu_read_unlock();
943 return mtu; 945 return mtu;
944} 946}
945 947
@@ -956,7 +958,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
956 dst->obsolete = DST_OBSOLETE_KILL; 958 dst->obsolete = DST_OBSOLETE_KILL;
957 } else { 959 } else {
958 rt->rt_pmtu = mtu; 960 rt->rt_pmtu = mtu;
959 dst_set_expires(&rt->dst, ip_rt_mtu_expires); 961 rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires);
960 } 962 }
961} 963}
962 964
@@ -1263,7 +1265,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
1263{ 1265{
1264 struct rtable *rt = (struct rtable *) dst; 1266 struct rtable *rt = (struct rtable *) dst;
1265 1267
1266 if (dst->flags & DST_NOCACHE) { 1268 if (!list_empty(&rt->rt_uncached)) {
1267 spin_lock_bh(&rt_uncached_lock); 1269 spin_lock_bh(&rt_uncached_lock);
1268 list_del(&rt->rt_uncached); 1270 list_del(&rt->rt_uncached);
1269 spin_unlock_bh(&rt_uncached_lock); 1271 spin_unlock_bh(&rt_uncached_lock);
@@ -2028,7 +2030,6 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2028 } 2030 }
2029 dev_out = net->loopback_dev; 2031 dev_out = net->loopback_dev;
2030 fl4->flowi4_oif = dev_out->ifindex; 2032 fl4->flowi4_oif = dev_out->ifindex;
2031 res.fi = NULL;
2032 flags |= RTCF_LOCAL; 2033 flags |= RTCF_LOCAL;
2033 goto make_route; 2034 goto make_route;
2034 } 2035 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 85308b90df80..6e38c6c23caa 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2926,13 +2926,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2926 * tcp_xmit_retransmit_queue(). 2926 * tcp_xmit_retransmit_queue().
2927 */ 2927 */
2928static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, 2928static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2929 int newly_acked_sacked, bool is_dupack, 2929 int prior_sacked, bool is_dupack,
2930 int flag) 2930 int flag)
2931{ 2931{
2932 struct inet_connection_sock *icsk = inet_csk(sk); 2932 struct inet_connection_sock *icsk = inet_csk(sk);
2933 struct tcp_sock *tp = tcp_sk(sk); 2933 struct tcp_sock *tp = tcp_sk(sk);
2934 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 2934 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
2935 (tcp_fackets_out(tp) > tp->reordering)); 2935 (tcp_fackets_out(tp) > tp->reordering));
2936 int newly_acked_sacked = 0;
2936 int fast_rexmit = 0; 2937 int fast_rexmit = 0;
2937 2938
2938 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 2939 if (WARN_ON(!tp->packets_out && tp->sacked_out))
@@ -2992,6 +2993,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2992 tcp_add_reno_sack(sk); 2993 tcp_add_reno_sack(sk);
2993 } else 2994 } else
2994 do_lost = tcp_try_undo_partial(sk, pkts_acked); 2995 do_lost = tcp_try_undo_partial(sk, pkts_acked);
2996 newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
2995 break; 2997 break;
2996 case TCP_CA_Loss: 2998 case TCP_CA_Loss:
2997 if (flag & FLAG_DATA_ACKED) 2999 if (flag & FLAG_DATA_ACKED)
@@ -3013,6 +3015,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3013 if (is_dupack) 3015 if (is_dupack)
3014 tcp_add_reno_sack(sk); 3016 tcp_add_reno_sack(sk);
3015 } 3017 }
3018 newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
3016 3019
3017 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 3020 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
3018 tcp_try_undo_dsack(sk); 3021 tcp_try_undo_dsack(sk);
@@ -3590,7 +3593,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3590 int prior_packets; 3593 int prior_packets;
3591 int prior_sacked = tp->sacked_out; 3594 int prior_sacked = tp->sacked_out;
3592 int pkts_acked = 0; 3595 int pkts_acked = 0;
3593 int newly_acked_sacked = 0;
3594 bool frto_cwnd = false; 3596 bool frto_cwnd = false;
3595 3597
3596 /* If the ack is older than previous acks 3598 /* If the ack is older than previous acks
@@ -3666,8 +3668,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3666 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); 3668 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
3667 3669
3668 pkts_acked = prior_packets - tp->packets_out; 3670 pkts_acked = prior_packets - tp->packets_out;
3669 newly_acked_sacked = (prior_packets - prior_sacked) -
3670 (tp->packets_out - tp->sacked_out);
3671 3671
3672 if (tp->frto_counter) 3672 if (tp->frto_counter)
3673 frto_cwnd = tcp_process_frto(sk, flag); 3673 frto_cwnd = tcp_process_frto(sk, flag);
@@ -3681,7 +3681,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3681 tcp_may_raise_cwnd(sk, flag)) 3681 tcp_may_raise_cwnd(sk, flag))
3682 tcp_cong_avoid(sk, ack, prior_in_flight); 3682 tcp_cong_avoid(sk, ack, prior_in_flight);
3683 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3683 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
3684 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, 3684 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3685 is_dupack, flag); 3685 is_dupack, flag);
3686 } else { 3686 } else {
3687 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) 3687 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
@@ -3698,7 +3698,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3698no_queue: 3698no_queue:
3699 /* If data was DSACKed, see if we can undo a cwnd reduction. */ 3699 /* If data was DSACKed, see if we can undo a cwnd reduction. */
3700 if (flag & FLAG_DSACKING_ACK) 3700 if (flag & FLAG_DSACKING_ACK)
3701 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, 3701 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3702 is_dupack, flag); 3702 is_dupack, flag);
3703 /* If this ack opens up a zero window, clear backoff. It was 3703 /* If this ack opens up a zero window, clear backoff. It was
3704 * being used to time the probes, and is probably far higher than 3704 * being used to time the probes, and is probably far higher than
@@ -3718,8 +3718,7 @@ old_ack:
3718 */ 3718 */
3719 if (TCP_SKB_CB(skb)->sacked) { 3719 if (TCP_SKB_CB(skb)->sacked) {
3720 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3720 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
3721 newly_acked_sacked = tp->sacked_out - prior_sacked; 3721 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3722 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
3723 is_dupack, flag); 3722 is_dupack, flag);
3724 } 3723 }
3725 3724
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 767823764016..00a748d14062 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -417,10 +417,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
417 417
418 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ 418 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
419 tp->mtu_info = info; 419 tp->mtu_info = info;
420 if (!sock_owned_by_user(sk)) 420 if (!sock_owned_by_user(sk)) {
421 tcp_v4_mtu_reduced(sk); 421 tcp_v4_mtu_reduced(sk);
422 else 422 } else {
423 set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags); 423 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
424 sock_hold(sk);
425 }
424 goto out; 426 goto out;
425 } 427 }
426 428
@@ -1462,6 +1464,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1462 goto exit_nonewsk; 1464 goto exit_nonewsk;
1463 1465
1464 newsk->sk_gso_type = SKB_GSO_TCPV4; 1466 newsk->sk_gso_type = SKB_GSO_TCPV4;
1467 inet_sk_rx_dst_set(newsk, skb);
1465 1468
1466 newtp = tcp_sk(newsk); 1469 newtp = tcp_sk(newsk);
1467 newinet = inet_sk(newsk); 1470 newinet = inet_sk(newsk);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index d9c9dcef2de3..6ff7f10dce9d 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -387,8 +387,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
387 struct tcp_sock *oldtp = tcp_sk(sk); 387 struct tcp_sock *oldtp = tcp_sk(sk);
388 struct tcp_cookie_values *oldcvp = oldtp->cookie_values; 388 struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
389 389
390 newicsk->icsk_af_ops->sk_rx_dst_set(newsk, skb);
391
392 /* TCP Cookie Transactions require space for the cookie pair, 390 /* TCP Cookie Transactions require space for the cookie pair,
393 * as it differs for each connection. There is no need to 391 * as it differs for each connection. There is no need to
394 * copy any s_data_payload stored at the original socket. 392 * copy any s_data_payload stored at the original socket.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 20dfd892c86f..d04632673a9e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -910,14 +910,18 @@ void tcp_release_cb(struct sock *sk)
910 if (flags & (1UL << TCP_TSQ_DEFERRED)) 910 if (flags & (1UL << TCP_TSQ_DEFERRED))
911 tcp_tsq_handler(sk); 911 tcp_tsq_handler(sk);
912 912
913 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) 913 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
914 tcp_write_timer_handler(sk); 914 tcp_write_timer_handler(sk);
915 915 __sock_put(sk);
916 if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) 916 }
917 if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
917 tcp_delack_timer_handler(sk); 918 tcp_delack_timer_handler(sk);
918 919 __sock_put(sk);
919 if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) 920 }
921 if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
920 sk->sk_prot->mtu_reduced(sk); 922 sk->sk_prot->mtu_reduced(sk);
923 __sock_put(sk);
924 }
921} 925}
922EXPORT_SYMBOL(tcp_release_cb); 926EXPORT_SYMBOL(tcp_release_cb);
923 927
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 6df36ad55a38..b774a03bd1dc 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -252,7 +252,8 @@ static void tcp_delack_timer(unsigned long data)
252 inet_csk(sk)->icsk_ack.blocked = 1; 252 inet_csk(sk)->icsk_ack.blocked = 1;
253 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); 253 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
254 /* deleguate our work to tcp_release_cb() */ 254 /* deleguate our work to tcp_release_cb() */
255 set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags); 255 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
256 sock_hold(sk);
256 } 257 }
257 bh_unlock_sock(sk); 258 bh_unlock_sock(sk);
258 sock_put(sk); 259 sock_put(sk);
@@ -481,7 +482,8 @@ static void tcp_write_timer(unsigned long data)
481 tcp_write_timer_handler(sk); 482 tcp_write_timer_handler(sk);
482 } else { 483 } else {
483 /* deleguate our work to tcp_release_cb() */ 484 /* deleguate our work to tcp_release_cb() */
484 set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags); 485 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
486 sock_hold(sk);
485 } 487 }
486 bh_unlock_sock(sk); 488 bh_unlock_sock(sk);
487 sock_put(sk); 489 sock_put(sk);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 79181819a24f..6bc85f7c31e3 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -494,8 +494,7 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
494 struct net_device *dev; 494 struct net_device *dev;
495 struct inet6_dev *idev; 495 struct inet6_dev *idev;
496 496
497 rcu_read_lock(); 497 for_each_netdev(net, dev) {
498 for_each_netdev_rcu(net, dev) {
499 idev = __in6_dev_get(dev); 498 idev = __in6_dev_get(dev);
500 if (idev) { 499 if (idev) {
501 int changed = (!idev->cnf.forwarding) ^ (!newf); 500 int changed = (!idev->cnf.forwarding) ^ (!newf);
@@ -504,7 +503,6 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
504 dev_forward_change(idev); 503 dev_forward_change(idev);
505 } 504 }
506 } 505 }
507 rcu_read_unlock();
508} 506}
509 507
510static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) 508static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 6dc7fd353ef5..282f3723ee19 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -167,8 +167,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
167 struct esp_data *esp = x->data; 167 struct esp_data *esp = x->data;
168 168
169 /* skb is pure payload to encrypt */ 169 /* skb is pure payload to encrypt */
170 err = -ENOMEM;
171
172 aead = esp->aead; 170 aead = esp->aead;
173 alen = crypto_aead_authsize(aead); 171 alen = crypto_aead_authsize(aead);
174 172
@@ -203,8 +201,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
203 } 201 }
204 202
205 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); 203 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
206 if (!tmp) 204 if (!tmp) {
205 err = -ENOMEM;
207 goto error; 206 goto error;
207 }
208 208
209 seqhi = esp_tmp_seqhi(tmp); 209 seqhi = esp_tmp_seqhi(tmp);
210 iv = esp_tmp_iv(aead, tmp, seqhilen); 210 iv = esp_tmp_iv(aead, tmp, seqhilen);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index da2e92d05c15..745a32042950 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -307,10 +307,10 @@ static int __net_init ipv6_proc_init_net(struct net *net)
307 goto proc_dev_snmp6_fail; 307 goto proc_dev_snmp6_fail;
308 return 0; 308 return 0;
309 309
310proc_dev_snmp6_fail:
311 proc_net_remove(net, "snmp6");
310proc_snmp6_fail: 312proc_snmp6_fail:
311 proc_net_remove(net, "sockstat6"); 313 proc_net_remove(net, "sockstat6");
312proc_dev_snmp6_fail:
313 proc_net_remove(net, "dev_snmp6");
314 return -ENOMEM; 314 return -ENOMEM;
315} 315}
316 316
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index bb9ce2b2f377..a3e60cc04a8a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -94,6 +94,18 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
94} 94}
95#endif 95#endif
96 96
97static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
98{
99 struct dst_entry *dst = skb_dst(skb);
100 const struct rt6_info *rt = (const struct rt6_info *)dst;
101
102 dst_hold(dst);
103 sk->sk_rx_dst = dst;
104 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
105 if (rt->rt6i_node)
106 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
107}
108
97static void tcp_v6_hash(struct sock *sk) 109static void tcp_v6_hash(struct sock *sk)
98{ 110{
99 if (sk->sk_state != TCP_CLOSE) { 111 if (sk->sk_state != TCP_CLOSE) {
@@ -1270,6 +1282,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1270 1282
1271 newsk->sk_gso_type = SKB_GSO_TCPV6; 1283 newsk->sk_gso_type = SKB_GSO_TCPV6;
1272 __ip6_dst_store(newsk, dst, NULL, NULL); 1284 __ip6_dst_store(newsk, dst, NULL, NULL);
1285 inet6_sk_rx_dst_set(newsk, skb);
1273 1286
1274 newtcp6sk = (struct tcp6_sock *)newsk; 1287 newtcp6sk = (struct tcp6_sock *)newsk;
1275 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; 1288 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
@@ -1729,18 +1742,6 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1729 .twsk_destructor= tcp_twsk_destructor, 1742 .twsk_destructor= tcp_twsk_destructor,
1730}; 1743};
1731 1744
1732static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1733{
1734 struct dst_entry *dst = skb_dst(skb);
1735 const struct rt6_info *rt = (const struct rt6_info *)dst;
1736
1737 dst_hold(dst);
1738 sk->sk_rx_dst = dst;
1739 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1740 if (rt->rt6i_node)
1741 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
1742}
1743
1744static const struct inet_connection_sock_af_ops ipv6_specific = { 1745static const struct inet_connection_sock_af_ops ipv6_specific = {
1745 .queue_xmit = inet6_csk_xmit, 1746 .queue_xmit = inet6_csk_xmit,
1746 .send_check = tcp_v6_send_check, 1747 .send_check = tcp_v6_send_check,
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index ef39812107b1..f8c4c08ffb60 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -73,6 +73,13 @@ static int xfrm6_get_tos(const struct flowi *fl)
73 return 0; 73 return 0;
74} 74}
75 75
76static void xfrm6_init_dst(struct net *net, struct xfrm_dst *xdst)
77{
78 struct rt6_info *rt = (struct rt6_info *)xdst;
79
80 rt6_init_peer(rt, net->ipv6.peers);
81}
82
76static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst, 83static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
77 int nfheader_len) 84 int nfheader_len)
78{ 85{
@@ -286,6 +293,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
286 .get_saddr = xfrm6_get_saddr, 293 .get_saddr = xfrm6_get_saddr,
287 .decode_session = _decode_session6, 294 .decode_session = _decode_session6,
288 .get_tos = xfrm6_get_tos, 295 .get_tos = xfrm6_get_tos,
296 .init_dst = xfrm6_init_dst,
289 .init_path = xfrm6_init_path, 297 .init_path = xfrm6_init_path,
290 .fill_dst = xfrm6_fill_dst, 298 .fill_dst = xfrm6_fill_dst,
291 .blackhole_route = ip6_blackhole_route, 299 .blackhole_route = ip6_blackhole_route,
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 393355d37b47..513cab08a986 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1347 /* Remove from tunnel list */ 1347 /* Remove from tunnel list */
1348 spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1348 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1349 list_del_rcu(&tunnel->list); 1349 list_del_rcu(&tunnel->list);
1350 kfree_rcu(tunnel, rcu);
1350 spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1351 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1351 synchronize_rcu();
1352 1352
1353 atomic_dec(&l2tp_tunnel_count); 1353 atomic_dec(&l2tp_tunnel_count);
1354 kfree(tunnel);
1355} 1354}
1356 1355
1357/* Create a socket for the tunnel, if one isn't set up by 1356/* Create a socket for the tunnel, if one isn't set up by
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a38ec6cdeee1..56d583e083a7 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg {
163 163
164struct l2tp_tunnel { 164struct l2tp_tunnel {
165 int magic; /* Should be L2TP_TUNNEL_MAGIC */ 165 int magic; /* Should be L2TP_TUNNEL_MAGIC */
166 struct rcu_head rcu;
166 rwlock_t hlist_lock; /* protect session_hlist */ 167 rwlock_t hlist_lock; /* protect session_hlist */
167 struct hlist_head session_hlist[L2TP_HASH_SIZE]; 168 struct hlist_head session_hlist[L2TP_HASH_SIZE];
168 /* hashed list of sessions, 169 /* hashed list of sessions,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 35e1e4bde587..927547171bc7 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -410,6 +410,7 @@ static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
410 lsa->l2tp_family = AF_INET6; 410 lsa->l2tp_family = AF_INET6;
411 lsa->l2tp_flowinfo = 0; 411 lsa->l2tp_flowinfo = 0;
412 lsa->l2tp_scope_id = 0; 412 lsa->l2tp_scope_id = 0;
413 lsa->l2tp_unused = 0;
413 if (peer) { 414 if (peer) {
414 if (!lsk->peer_conn_id) 415 if (!lsk->peer_conn_id)
415 return -ENOTCONN; 416 return -ENOTCONN;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index f6fe4d400502..c2190005a114 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -969,14 +969,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
969 struct sockaddr_llc sllc; 969 struct sockaddr_llc sllc;
970 struct sock *sk = sock->sk; 970 struct sock *sk = sock->sk;
971 struct llc_sock *llc = llc_sk(sk); 971 struct llc_sock *llc = llc_sk(sk);
972 int rc = 0; 972 int rc = -EBADF;
973 973
974 memset(&sllc, 0, sizeof(sllc)); 974 memset(&sllc, 0, sizeof(sllc));
975 lock_sock(sk); 975 lock_sock(sk);
976 if (sock_flag(sk, SOCK_ZAPPED)) 976 if (sock_flag(sk, SOCK_ZAPPED))
977 goto out; 977 goto out;
978 *uaddrlen = sizeof(sllc); 978 *uaddrlen = sizeof(sllc);
979 memset(uaddr, 0, *uaddrlen);
980 if (peer) { 979 if (peer) {
981 rc = -ENOTCONN; 980 rc = -ENOTCONN;
982 if (sk->sk_state != TCP_ESTABLISHED) 981 if (sk->sk_state != TCP_ESTABLISHED)
@@ -1206,7 +1205,7 @@ static int __init llc2_init(void)
1206 rc = llc_proc_init(); 1205 rc = llc_proc_init();
1207 if (rc != 0) { 1206 if (rc != 0) {
1208 printk(llc_proc_err_msg); 1207 printk(llc_proc_err_msg);
1209 goto out_unregister_llc_proto; 1208 goto out_station;
1210 } 1209 }
1211 rc = llc_sysctl_init(); 1210 rc = llc_sysctl_init();
1212 if (rc) { 1211 if (rc) {
@@ -1226,7 +1225,8 @@ out_sysctl:
1226 llc_sysctl_exit(); 1225 llc_sysctl_exit();
1227out_proc: 1226out_proc:
1228 llc_proc_exit(); 1227 llc_proc_exit();
1229out_unregister_llc_proto: 1228out_station:
1229 llc_station_exit();
1230 proto_unregister(&llc_proto); 1230 proto_unregister(&llc_proto);
1231 goto out; 1231 goto out;
1232} 1232}
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index e32cab44ea95..dd3e83328ad5 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -42,6 +42,7 @@ static void (*llc_type_handlers[2])(struct llc_sap *sap,
42void llc_add_pack(int type, void (*handler)(struct llc_sap *sap, 42void llc_add_pack(int type, void (*handler)(struct llc_sap *sap,
43 struct sk_buff *skb)) 43 struct sk_buff *skb))
44{ 44{
45 smp_wmb(); /* ensure initialisation is complete before it's called */
45 if (type == LLC_DEST_SAP || type == LLC_DEST_CONN) 46 if (type == LLC_DEST_SAP || type == LLC_DEST_CONN)
46 llc_type_handlers[type - 1] = handler; 47 llc_type_handlers[type - 1] = handler;
47} 48}
@@ -50,11 +51,19 @@ void llc_remove_pack(int type)
50{ 51{
51 if (type == LLC_DEST_SAP || type == LLC_DEST_CONN) 52 if (type == LLC_DEST_SAP || type == LLC_DEST_CONN)
52 llc_type_handlers[type - 1] = NULL; 53 llc_type_handlers[type - 1] = NULL;
54 synchronize_net();
53} 55}
54 56
55void llc_set_station_handler(void (*handler)(struct sk_buff *skb)) 57void llc_set_station_handler(void (*handler)(struct sk_buff *skb))
56{ 58{
59 /* Ensure initialisation is complete before it's called */
60 if (handler)
61 smp_wmb();
62
57 llc_station_handler = handler; 63 llc_station_handler = handler;
64
65 if (!handler)
66 synchronize_net();
58} 67}
59 68
60/** 69/**
@@ -150,6 +159,8 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
150 int dest; 159 int dest;
151 int (*rcv)(struct sk_buff *, struct net_device *, 160 int (*rcv)(struct sk_buff *, struct net_device *,
152 struct packet_type *, struct net_device *); 161 struct packet_type *, struct net_device *);
162 void (*sta_handler)(struct sk_buff *skb);
163 void (*sap_handler)(struct llc_sap *sap, struct sk_buff *skb);
153 164
154 if (!net_eq(dev_net(dev), &init_net)) 165 if (!net_eq(dev_net(dev), &init_net))
155 goto drop; 166 goto drop;
@@ -182,7 +193,8 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
182 */ 193 */
183 rcv = rcu_dereference(sap->rcv_func); 194 rcv = rcu_dereference(sap->rcv_func);
184 dest = llc_pdu_type(skb); 195 dest = llc_pdu_type(skb);
185 if (unlikely(!dest || !llc_type_handlers[dest - 1])) { 196 sap_handler = dest ? ACCESS_ONCE(llc_type_handlers[dest - 1]) : NULL;
197 if (unlikely(!sap_handler)) {
186 if (rcv) 198 if (rcv)
187 rcv(skb, dev, pt, orig_dev); 199 rcv(skb, dev, pt, orig_dev);
188 else 200 else
@@ -193,7 +205,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
193 if (cskb) 205 if (cskb)
194 rcv(cskb, dev, pt, orig_dev); 206 rcv(cskb, dev, pt, orig_dev);
195 } 207 }
196 llc_type_handlers[dest - 1](sap, skb); 208 sap_handler(sap, skb);
197 } 209 }
198 llc_sap_put(sap); 210 llc_sap_put(sap);
199out: 211out:
@@ -202,9 +214,10 @@ drop:
202 kfree_skb(skb); 214 kfree_skb(skb);
203 goto out; 215 goto out;
204handle_station: 216handle_station:
205 if (!llc_station_handler) 217 sta_handler = ACCESS_ONCE(llc_station_handler);
218 if (!sta_handler)
206 goto drop; 219 goto drop;
207 llc_station_handler(skb); 220 sta_handler(skb);
208 goto out; 221 goto out;
209} 222}
210 223
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index 6828e39ec2ec..b2f2bac2c2a2 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -687,12 +687,8 @@ static void llc_station_rcv(struct sk_buff *skb)
687 llc_station_state_process(skb); 687 llc_station_state_process(skb);
688} 688}
689 689
690int __init llc_station_init(void) 690void __init llc_station_init(void)
691{ 691{
692 int rc = -ENOBUFS;
693 struct sk_buff *skb;
694 struct llc_station_state_ev *ev;
695
696 skb_queue_head_init(&llc_main_station.mac_pdu_q); 692 skb_queue_head_init(&llc_main_station.mac_pdu_q);
697 skb_queue_head_init(&llc_main_station.ev_q.list); 693 skb_queue_head_init(&llc_main_station.ev_q.list);
698 spin_lock_init(&llc_main_station.ev_q.lock); 694 spin_lock_init(&llc_main_station.ev_q.lock);
@@ -700,23 +696,12 @@ int __init llc_station_init(void)
700 (unsigned long)&llc_main_station); 696 (unsigned long)&llc_main_station);
701 llc_main_station.ack_timer.expires = jiffies + 697 llc_main_station.ack_timer.expires = jiffies +
702 sysctl_llc_station_ack_timeout; 698 sysctl_llc_station_ack_timeout;
703 skb = alloc_skb(0, GFP_ATOMIC);
704 if (!skb)
705 goto out;
706 rc = 0;
707 llc_set_station_handler(llc_station_rcv);
708 ev = llc_station_ev(skb);
709 memset(ev, 0, sizeof(*ev));
710 llc_main_station.maximum_retry = 1; 699 llc_main_station.maximum_retry = 1;
711 llc_main_station.state = LLC_STATION_STATE_DOWN; 700 llc_main_station.state = LLC_STATION_STATE_UP;
712 ev->type = LLC_STATION_EV_TYPE_SIMPLE; 701 llc_set_station_handler(llc_station_rcv);
713 ev->prim_type = LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK;
714 rc = llc_station_next_state(skb);
715out:
716 return rc;
717} 702}
718 703
719void __exit llc_station_exit(void) 704void llc_station_exit(void)
720{ 705{
721 llc_set_station_handler(NULL); 706 llc_set_station_handler(NULL);
722} 707}
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index acf712ffb5e6..c5e8c9c31f76 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1811,37 +1811,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1811 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, 1811 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
1812 sdata, NULL, NULL); 1812 sdata, NULL, NULL);
1813 } else { 1813 } else {
1814 int is_mesh_mcast = 1; 1814 /* DS -> MBSS (802.11-2012 13.11.3.3).
1815 const u8 *mesh_da; 1815 * For unicast with unknown forwarding information,
1816 * destination might be in the MBSS or if that fails
1817 * forwarded to another mesh gate. In either case
1818 * resolution will be handled in ieee80211_xmit(), so
1819 * leave the original DA. This also works for mcast */
1820 const u8 *mesh_da = skb->data;
1821
1822 if (mppath)
1823 mesh_da = mppath->mpp;
1824 else if (mpath)
1825 mesh_da = mpath->dst;
1826 rcu_read_unlock();
1816 1827
1817 if (is_multicast_ether_addr(skb->data))
1818 /* DA TA mSA AE:SA */
1819 mesh_da = skb->data;
1820 else {
1821 static const u8 bcast[ETH_ALEN] =
1822 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1823 if (mppath) {
1824 /* RA TA mDA mSA AE:DA SA */
1825 mesh_da = mppath->mpp;
1826 is_mesh_mcast = 0;
1827 } else if (mpath) {
1828 mesh_da = mpath->dst;
1829 is_mesh_mcast = 0;
1830 } else {
1831 /* DA TA mSA AE:SA */
1832 mesh_da = bcast;
1833 }
1834 }
1835 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1828 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1836 mesh_da, sdata->vif.addr); 1829 mesh_da, sdata->vif.addr);
1837 rcu_read_unlock(); 1830 if (is_multicast_ether_addr(mesh_da))
1838 if (is_mesh_mcast) 1831 /* DA TA mSA AE:SA */
1839 meshhdrlen = 1832 meshhdrlen =
1840 ieee80211_new_mesh_header(&mesh_hdr, 1833 ieee80211_new_mesh_header(&mesh_hdr,
1841 sdata, 1834 sdata,
1842 skb->data + ETH_ALEN, 1835 skb->data + ETH_ALEN,
1843 NULL); 1836 NULL);
1844 else 1837 else
1838 /* RA TA mDA mSA AE:DA SA */
1845 meshhdrlen = 1839 meshhdrlen =
1846 ieee80211_new_mesh_header(&mesh_hdr, 1840 ieee80211_new_mesh_header(&mesh_hdr,
1847 sdata, 1841 sdata,
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 84444dda194b..f51013c07b9f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1171 goto out_err; 1171 goto out_err;
1172 } 1172 }
1173 svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); 1173 svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
1174 if (!svc->stats.cpustats) 1174 if (!svc->stats.cpustats) {
1175 ret = -ENOMEM;
1175 goto out_err; 1176 goto out_err;
1177 }
1176 1178
1177 /* I'm the first user of the service */ 1179 /* I'm the first user of the service */
1178 atomic_set(&svc->usecnt, 0); 1180 atomic_set(&svc->usecnt, 0);
@@ -2759,6 +2761,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2759 { 2761 {
2760 struct ip_vs_timeout_user t; 2762 struct ip_vs_timeout_user t;
2761 2763
2764 memset(&t, 0, sizeof(t));
2762 __ip_vs_get_timeouts(net, &t); 2765 __ip_vs_get_timeouts(net, &t);
2763 if (copy_to_user(user, &t, sizeof(t)) != 0) 2766 if (copy_to_user(user, &t, sizeof(t)) != 0)
2764 ret = -EFAULT; 2767 ret = -EFAULT;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index cf4875565d67..2ceec64b19f9 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack)
249{ 249{
250 struct nf_conn *ct = (void *)ul_conntrack; 250 struct nf_conn *ct = (void *)ul_conntrack;
251 struct net *net = nf_ct_net(ct); 251 struct net *net = nf_ct_net(ct);
252 struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
253
254 BUG_ON(ecache == NULL);
252 255
253 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { 256 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
254 /* bad luck, let's retry again */ 257 /* bad luck, let's retry again */
255 ct->timeout.expires = jiffies + 258 ecache->timeout.expires = jiffies +
256 (random32() % net->ct.sysctl_events_retry_timeout); 259 (random32() % net->ct.sysctl_events_retry_timeout);
257 add_timer(&ct->timeout); 260 add_timer(&ecache->timeout);
258 return; 261 return;
259 } 262 }
260 /* we've got the event delivered, now it's dying */ 263 /* we've got the event delivered, now it's dying */
@@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack)
268void nf_ct_insert_dying_list(struct nf_conn *ct) 271void nf_ct_insert_dying_list(struct nf_conn *ct)
269{ 272{
270 struct net *net = nf_ct_net(ct); 273 struct net *net = nf_ct_net(ct);
274 struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
275
276 BUG_ON(ecache == NULL);
271 277
272 /* add this conntrack to the dying list */ 278 /* add this conntrack to the dying list */
273 spin_lock_bh(&nf_conntrack_lock); 279 spin_lock_bh(&nf_conntrack_lock);
@@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
275 &net->ct.dying); 281 &net->ct.dying);
276 spin_unlock_bh(&nf_conntrack_lock); 282 spin_unlock_bh(&nf_conntrack_lock);
277 /* set a new timer to retry event delivery */ 283 /* set a new timer to retry event delivery */
278 setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); 284 setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
279 ct->timeout.expires = jiffies + 285 ecache->timeout.expires = jiffies +
280 (random32() % net->ct.sysctl_events_retry_timeout); 286 (random32() % net->ct.sysctl_events_retry_timeout);
281 add_timer(&ct->timeout); 287 add_timer(&ecache->timeout);
282} 288}
283EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); 289EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
284 290
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 45cf602a76bc..527651a53a45 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -361,23 +361,6 @@ static void evict_oldest_expect(struct nf_conn *master,
361 } 361 }
362} 362}
363 363
364static inline int refresh_timer(struct nf_conntrack_expect *i)
365{
366 struct nf_conn_help *master_help = nfct_help(i->master);
367 const struct nf_conntrack_expect_policy *p;
368
369 if (!del_timer(&i->timeout))
370 return 0;
371
372 p = &rcu_dereference_protected(
373 master_help->helper,
374 lockdep_is_held(&nf_conntrack_lock)
375 )->expect_policy[i->class];
376 i->timeout.expires = jiffies + p->timeout * HZ;
377 add_timer(&i->timeout);
378 return 1;
379}
380
381static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) 364static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
382{ 365{
383 const struct nf_conntrack_expect_policy *p; 366 const struct nf_conntrack_expect_policy *p;
@@ -386,7 +369,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
386 struct nf_conn_help *master_help = nfct_help(master); 369 struct nf_conn_help *master_help = nfct_help(master);
387 struct nf_conntrack_helper *helper; 370 struct nf_conntrack_helper *helper;
388 struct net *net = nf_ct_exp_net(expect); 371 struct net *net = nf_ct_exp_net(expect);
389 struct hlist_node *n; 372 struct hlist_node *n, *next;
390 unsigned int h; 373 unsigned int h;
391 int ret = 1; 374 int ret = 1;
392 375
@@ -395,12 +378,12 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
395 goto out; 378 goto out;
396 } 379 }
397 h = nf_ct_expect_dst_hash(&expect->tuple); 380 h = nf_ct_expect_dst_hash(&expect->tuple);
398 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { 381 hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {
399 if (expect_matches(i, expect)) { 382 if (expect_matches(i, expect)) {
400 /* Refresh timer: if it's dying, ignore.. */ 383 if (del_timer(&i->timeout)) {
401 if (refresh_timer(i)) { 384 nf_ct_unlink_expect(i);
402 ret = 0; 385 nf_ct_expect_put(i);
403 goto out; 386 break;
404 } 387 }
405 } else if (expect_clash(i, expect)) { 388 } else if (expect_clash(i, expect)) {
406 ret = -EBUSY; 389 ret = -EBUSY;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 14f67a2cbcb5..9807f3278fcb 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1896,10 +1896,15 @@ static int
1896ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct) 1896ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
1897{ 1897{
1898 struct nlattr *cda[CTA_MAX+1]; 1898 struct nlattr *cda[CTA_MAX+1];
1899 int ret;
1899 1900
1900 nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy); 1901 nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy);
1901 1902
1902 return ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct); 1903 spin_lock_bh(&nf_conntrack_lock);
1904 ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
1905 spin_unlock_bh(&nf_conntrack_lock);
1906
1907 return ret;
1903} 1908}
1904 1909
1905static struct nfq_ct_hook ctnetlink_nfqueue_hook = { 1910static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
@@ -2785,7 +2790,8 @@ static int __init ctnetlink_init(void)
2785 goto err_unreg_subsys; 2790 goto err_unreg_subsys;
2786 } 2791 }
2787 2792
2788 if (register_pernet_subsys(&ctnetlink_net_ops)) { 2793 ret = register_pernet_subsys(&ctnetlink_net_ops);
2794 if (ret < 0) {
2789 pr_err("ctnetlink_init: cannot register pernet operations\n"); 2795 pr_err("ctnetlink_init: cannot register pernet operations\n");
2790 goto err_unreg_exp_subsys; 2796 goto err_unreg_exp_subsys;
2791 } 2797 }
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 758a1bacc126..5c0a112aeee6 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -183,12 +183,12 @@ static int media_len(const struct nf_conn *ct, const char *dptr,
183 return len + digits_len(ct, dptr, limit, shift); 183 return len + digits_len(ct, dptr, limit, shift);
184} 184}
185 185
186static int parse_addr(const struct nf_conn *ct, const char *cp, 186static int sip_parse_addr(const struct nf_conn *ct, const char *cp,
187 const char **endp, union nf_inet_addr *addr, 187 const char **endp, union nf_inet_addr *addr,
188 const char *limit) 188 const char *limit, bool delim)
189{ 189{
190 const char *end; 190 const char *end;
191 int ret = 0; 191 int ret;
192 192
193 if (!ct) 193 if (!ct)
194 return 0; 194 return 0;
@@ -197,16 +197,28 @@ static int parse_addr(const struct nf_conn *ct, const char *cp,
197 switch (nf_ct_l3num(ct)) { 197 switch (nf_ct_l3num(ct)) {
198 case AF_INET: 198 case AF_INET:
199 ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); 199 ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end);
200 if (ret == 0)
201 return 0;
200 break; 202 break;
201 case AF_INET6: 203 case AF_INET6:
204 if (cp < limit && *cp == '[')
205 cp++;
206 else if (delim)
207 return 0;
208
202 ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end); 209 ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end);
210 if (ret == 0)
211 return 0;
212
213 if (end < limit && *end == ']')
214 end++;
215 else if (delim)
216 return 0;
203 break; 217 break;
204 default: 218 default:
205 BUG(); 219 BUG();
206 } 220 }
207 221
208 if (ret == 0 || end == cp)
209 return 0;
210 if (endp) 222 if (endp)
211 *endp = end; 223 *endp = end;
212 return 1; 224 return 1;
@@ -219,7 +231,7 @@ static int epaddr_len(const struct nf_conn *ct, const char *dptr,
219 union nf_inet_addr addr; 231 union nf_inet_addr addr;
220 const char *aux = dptr; 232 const char *aux = dptr;
221 233
222 if (!parse_addr(ct, dptr, &dptr, &addr, limit)) { 234 if (!sip_parse_addr(ct, dptr, &dptr, &addr, limit, true)) {
223 pr_debug("ip: %s parse failed.!\n", dptr); 235 pr_debug("ip: %s parse failed.!\n", dptr);
224 return 0; 236 return 0;
225 } 237 }
@@ -296,7 +308,7 @@ int ct_sip_parse_request(const struct nf_conn *ct,
296 return 0; 308 return 0;
297 dptr += shift; 309 dptr += shift;
298 310
299 if (!parse_addr(ct, dptr, &end, addr, limit)) 311 if (!sip_parse_addr(ct, dptr, &end, addr, limit, true))
300 return -1; 312 return -1;
301 if (end < limit && *end == ':') { 313 if (end < limit && *end == ':') {
302 end++; 314 end++;
@@ -550,7 +562,7 @@ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
550 if (ret == 0) 562 if (ret == 0)
551 return ret; 563 return ret;
552 564
553 if (!parse_addr(ct, dptr + *matchoff, &c, addr, limit)) 565 if (!sip_parse_addr(ct, dptr + *matchoff, &c, addr, limit, true))
554 return -1; 566 return -1;
555 if (*c == ':') { 567 if (*c == ':') {
556 c++; 568 c++;
@@ -599,7 +611,7 @@ int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
599 unsigned int dataoff, unsigned int datalen, 611 unsigned int dataoff, unsigned int datalen,
600 const char *name, 612 const char *name,
601 unsigned int *matchoff, unsigned int *matchlen, 613 unsigned int *matchoff, unsigned int *matchlen,
602 union nf_inet_addr *addr) 614 union nf_inet_addr *addr, bool delim)
603{ 615{
604 const char *limit = dptr + datalen; 616 const char *limit = dptr + datalen;
605 const char *start, *end; 617 const char *start, *end;
@@ -613,7 +625,7 @@ int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
613 return 0; 625 return 0;
614 626
615 start += strlen(name); 627 start += strlen(name);
616 if (!parse_addr(ct, start, &end, addr, limit)) 628 if (!sip_parse_addr(ct, start, &end, addr, limit, delim))
617 return 0; 629 return 0;
618 *matchoff = start - dptr; 630 *matchoff = start - dptr;
619 *matchlen = end - start; 631 *matchlen = end - start;
@@ -675,6 +687,47 @@ static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr,
675 return 1; 687 return 1;
676} 688}
677 689
690static int sdp_parse_addr(const struct nf_conn *ct, const char *cp,
691 const char **endp, union nf_inet_addr *addr,
692 const char *limit)
693{
694 const char *end;
695 int ret;
696
697 memset(addr, 0, sizeof(*addr));
698 switch (nf_ct_l3num(ct)) {
699 case AF_INET:
700 ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end);
701 break;
702 case AF_INET6:
703 ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end);
704 break;
705 default:
706 BUG();
707 }
708
709 if (ret == 0)
710 return 0;
711 if (endp)
712 *endp = end;
713 return 1;
714}
715
716/* skip ip address. returns its length. */
717static int sdp_addr_len(const struct nf_conn *ct, const char *dptr,
718 const char *limit, int *shift)
719{
720 union nf_inet_addr addr;
721 const char *aux = dptr;
722
723 if (!sdp_parse_addr(ct, dptr, &dptr, &addr, limit)) {
724 pr_debug("ip: %s parse failed.!\n", dptr);
725 return 0;
726 }
727
728 return dptr - aux;
729}
730
678/* SDP header parsing: a SDP session description contains an ordered set of 731/* SDP header parsing: a SDP session description contains an ordered set of
679 * headers, starting with a section containing general session parameters, 732 * headers, starting with a section containing general session parameters,
680 * optionally followed by multiple media descriptions. 733 * optionally followed by multiple media descriptions.
@@ -686,10 +739,10 @@ static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr,
686 */ 739 */
687static const struct sip_header ct_sdp_hdrs[] = { 740static const struct sip_header ct_sdp_hdrs[] = {
688 [SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len), 741 [SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len),
689 [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", epaddr_len), 742 [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", sdp_addr_len),
690 [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", epaddr_len), 743 [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", sdp_addr_len),
691 [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", epaddr_len), 744 [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", sdp_addr_len),
692 [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", epaddr_len), 745 [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", sdp_addr_len),
693 [SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len), 746 [SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len),
694}; 747};
695 748
@@ -775,8 +828,8 @@ static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr,
775 if (ret <= 0) 828 if (ret <= 0)
776 return ret; 829 return ret;
777 830
778 if (!parse_addr(ct, dptr + *matchoff, NULL, addr, 831 if (!sdp_parse_addr(ct, dptr + *matchoff, NULL, addr,
779 dptr + *matchoff + *matchlen)) 832 dptr + *matchoff + *matchlen))
780 return -1; 833 return -1;
781 return 1; 834 return 1;
782} 835}
@@ -1515,7 +1568,6 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
1515} 1568}
1516 1569
1517static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly; 1570static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly;
1518static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly;
1519 1571
1520static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = { 1572static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = {
1521 [SIP_EXPECT_SIGNALLING] = { 1573 [SIP_EXPECT_SIGNALLING] = {
@@ -1585,9 +1637,9 @@ static int __init nf_conntrack_sip_init(void)
1585 sip[i][j].me = THIS_MODULE; 1637 sip[i][j].me = THIS_MODULE;
1586 1638
1587 if (ports[i] == SIP_PORT) 1639 if (ports[i] == SIP_PORT)
1588 sprintf(sip_names[i][j], "sip"); 1640 sprintf(sip[i][j].name, "sip");
1589 else 1641 else
1590 sprintf(sip_names[i][j], "sip-%u", i); 1642 sprintf(sip[i][j].name, "sip-%u", i);
1591 1643
1592 pr_debug("port #%u: %u\n", i, ports[i]); 1644 pr_debug("port #%u: %u\n", i, ports[i]);
1593 1645
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 169ab59ed9d4..14e2f3903142 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -480,7 +480,7 @@ __build_packet_message(struct nfulnl_instance *inst,
480 } 480 }
481 481
482 if (indev && skb_mac_header_was_set(skb)) { 482 if (indev && skb_mac_header_was_set(skb)) {
483 if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || 483 if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
484 nla_put_be16(inst->skb, NFULA_HWLEN, 484 nla_put_be16(inst->skb, NFULA_HWLEN,
485 htons(skb->dev->hard_header_len)) || 485 htons(skb->dev->hard_header_len)) ||
486 nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, 486 nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
@@ -996,8 +996,10 @@ static int __init nfnetlink_log_init(void)
996 996
997#ifdef CONFIG_PROC_FS 997#ifdef CONFIG_PROC_FS
998 if (!proc_create("nfnetlink_log", 0440, 998 if (!proc_create("nfnetlink_log", 0440,
999 proc_net_netfilter, &nful_file_ops)) 999 proc_net_netfilter, &nful_file_ops)) {
1000 status = -ENOMEM;
1000 goto cleanup_logger; 1001 goto cleanup_logger;
1002 }
1001#endif 1003#endif
1002 return status; 1004 return status;
1003 1005
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 5463969da45b..527023823b5c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1362,7 +1362,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1362 if (NULL == siocb->scm) 1362 if (NULL == siocb->scm)
1363 siocb->scm = &scm; 1363 siocb->scm = &scm;
1364 1364
1365 err = scm_send(sock, msg, siocb->scm); 1365 err = scm_send(sock, msg, siocb->scm, true);
1366 if (err < 0) 1366 if (err < 0)
1367 return err; 1367 return err;
1368 1368
@@ -1373,7 +1373,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1373 dst_pid = addr->nl_pid; 1373 dst_pid = addr->nl_pid;
1374 dst_group = ffs(addr->nl_groups); 1374 dst_group = ffs(addr->nl_groups);
1375 err = -EPERM; 1375 err = -EPERM;
1376 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) 1376 if ((dst_group || dst_pid) &&
1377 !netlink_capable(sock, NL_NONROOT_SEND))
1377 goto out; 1378 goto out;
1378 } else { 1379 } else {
1379 dst_pid = nlk->dst_pid; 1380 dst_pid = nlk->dst_pid;
@@ -2147,6 +2148,7 @@ static void __init netlink_add_usersock_entry(void)
2147 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); 2148 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2148 nl_table[NETLINK_USERSOCK].module = THIS_MODULE; 2149 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2149 nl_table[NETLINK_USERSOCK].registered = 1; 2150 nl_table[NETLINK_USERSOCK].registered = 1;
2151 nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
2150 2152
2151 netlink_table_ungrab(); 2153 netlink_table_ungrab();
2152} 2154}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8ac890a1a4c0..c5c9e2a54218 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1273,6 +1273,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1273 spin_unlock(&f->lock); 1273 spin_unlock(&f->lock);
1274} 1274}
1275 1275
1276static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
1277{
1278 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1279 return true;
1280
1281 return false;
1282}
1283
1276static int fanout_add(struct sock *sk, u16 id, u16 type_flags) 1284static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1277{ 1285{
1278 struct packet_sock *po = pkt_sk(sk); 1286 struct packet_sock *po = pkt_sk(sk);
@@ -1325,6 +1333,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1325 match->prot_hook.dev = po->prot_hook.dev; 1333 match->prot_hook.dev = po->prot_hook.dev;
1326 match->prot_hook.func = packet_rcv_fanout; 1334 match->prot_hook.func = packet_rcv_fanout;
1327 match->prot_hook.af_packet_priv = match; 1335 match->prot_hook.af_packet_priv = match;
1336 match->prot_hook.id_match = match_fanout_group;
1328 dev_add_pack(&match->prot_hook); 1337 dev_add_pack(&match->prot_hook);
1329 list_add(&match->list, &fanout_list); 1338 list_add(&match->list, &fanout_list);
1330 } 1339 }
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index fe81cc18e9e0..9c0fd0c78814 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -200,13 +200,12 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
200out: 200out:
201 if (err) { 201 if (err) {
202 m->tcf_qstats.overlimits++; 202 m->tcf_qstats.overlimits++;
203 /* should we be asking for packet to be dropped? 203 if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
204 * may make sense for redirect case only 204 retval = TC_ACT_SHOT;
205 */ 205 else
206 retval = TC_ACT_SHOT; 206 retval = m->tcf_action;
207 } else { 207 } else
208 retval = m->tcf_action; 208 retval = m->tcf_action;
209 }
210 spin_unlock(&m->tcf_lock); 209 spin_unlock(&m->tcf_lock);
211 210
212 return retval; 211 return retval;
diff --git a/net/socket.c b/net/socket.c
index dfe5b66c97e0..a5471f804d99 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2657,6 +2657,7 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
2657 if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) 2657 if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
2658 return -EFAULT; 2658 return -EFAULT;
2659 2659
2660 memset(&ifc, 0, sizeof(ifc));
2660 if (ifc32.ifcbuf == 0) { 2661 if (ifc32.ifcbuf == 0) {
2661 ifc32.ifc_len = 0; 2662 ifc32.ifc_len = 0;
2662 ifc.ifc_len = 0; 2663 ifc.ifc_len = 0;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 88f2bf671960..bac973a31367 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -316,7 +316,6 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
316 */ 316 */
317void svc_xprt_enqueue(struct svc_xprt *xprt) 317void svc_xprt_enqueue(struct svc_xprt *xprt)
318{ 318{
319 struct svc_serv *serv = xprt->xpt_server;
320 struct svc_pool *pool; 319 struct svc_pool *pool;
321 struct svc_rqst *rqstp; 320 struct svc_rqst *rqstp;
322 int cpu; 321 int cpu;
@@ -362,8 +361,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
362 rqstp, rqstp->rq_xprt); 361 rqstp, rqstp->rq_xprt);
363 rqstp->rq_xprt = xprt; 362 rqstp->rq_xprt = xprt;
364 svc_xprt_get(xprt); 363 svc_xprt_get(xprt);
365 rqstp->rq_reserved = serv->sv_max_mesg;
366 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
367 pool->sp_stats.threads_woken++; 364 pool->sp_stats.threads_woken++;
368 wake_up(&rqstp->rq_wait); 365 wake_up(&rqstp->rq_wait);
369 } else { 366 } else {
@@ -640,8 +637,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
640 if (xprt) { 637 if (xprt) {
641 rqstp->rq_xprt = xprt; 638 rqstp->rq_xprt = xprt;
642 svc_xprt_get(xprt); 639 svc_xprt_get(xprt);
643 rqstp->rq_reserved = serv->sv_max_mesg;
644 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
645 640
646 /* As there is a shortage of threads and this request 641 /* As there is a shortage of threads and this request
647 * had to be queued, don't allow the thread to wait so 642 * had to be queued, don't allow the thread to wait so
@@ -738,6 +733,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
738 else 733 else
739 len = xprt->xpt_ops->xpo_recvfrom(rqstp); 734 len = xprt->xpt_ops->xpo_recvfrom(rqstp);
740 dprintk("svc: got len=%d\n", len); 735 dprintk("svc: got len=%d\n", len);
736 rqstp->rq_reserved = serv->sv_max_mesg;
737 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
741 } 738 }
742 svc_xprt_received(xprt); 739 svc_xprt_received(xprt);
743 740
@@ -794,7 +791,8 @@ int svc_send(struct svc_rqst *rqstp)
794 791
795 /* Grab mutex to serialize outgoing data. */ 792 /* Grab mutex to serialize outgoing data. */
796 mutex_lock(&xprt->xpt_mutex); 793 mutex_lock(&xprt->xpt_mutex);
797 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 794 if (test_bit(XPT_DEAD, &xprt->xpt_flags)
795 || test_bit(XPT_CLOSE, &xprt->xpt_flags))
798 len = -ENOTCONN; 796 len = -ENOTCONN;
799 else 797 else
800 len = xprt->xpt_ops->xpo_sendto(rqstp); 798 len = xprt->xpt_ops->xpo_sendto(rqstp);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 18bc130255a7..998aa8c1807c 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1129,9 +1129,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
1129 if (len >= 0) 1129 if (len >= 0)
1130 svsk->sk_tcplen += len; 1130 svsk->sk_tcplen += len;
1131 if (len != want) { 1131 if (len != want) {
1132 svc_tcp_save_pages(svsk, rqstp);
1132 if (len < 0 && len != -EAGAIN) 1133 if (len < 0 && len != -EAGAIN)
1133 goto err_other; 1134 goto err_other;
1134 svc_tcp_save_pages(svsk, rqstp);
1135 dprintk("svc: incomplete TCP record (%d of %d)\n", 1135 dprintk("svc: incomplete TCP record (%d of %d)\n",
1136 svsk->sk_tcplen, svsk->sk_reclen); 1136 svsk->sk_tcplen, svsk->sk_reclen);
1137 goto err_noclose; 1137 goto err_noclose;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index e4768c180da2..c5ee4ff61364 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1450,7 +1450,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1450 if (NULL == siocb->scm) 1450 if (NULL == siocb->scm)
1451 siocb->scm = &tmp_scm; 1451 siocb->scm = &tmp_scm;
1452 wait_for_unix_gc(); 1452 wait_for_unix_gc();
1453 err = scm_send(sock, msg, siocb->scm); 1453 err = scm_send(sock, msg, siocb->scm, false);
1454 if (err < 0) 1454 if (err < 0)
1455 return err; 1455 return err;
1456 1456
@@ -1619,7 +1619,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1619 if (NULL == siocb->scm) 1619 if (NULL == siocb->scm)
1620 siocb->scm = &tmp_scm; 1620 siocb->scm = &tmp_scm;
1621 wait_for_unix_gc(); 1621 wait_for_unix_gc();
1622 err = scm_send(sock, msg, siocb->scm); 1622 err = scm_send(sock, msg, siocb->scm, false);
1623 if (err < 0) 1623 if (err < 0)
1624 return err; 1624 return err;
1625 1625
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c5a5165a5927..5a2aa17e4d3c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1357,6 +1357,8 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1357 1357
1358 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst)); 1358 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1359 xdst->flo.ops = &xfrm_bundle_fc_ops; 1359 xdst->flo.ops = &xfrm_bundle_fc_ops;
1360 if (afinfo->init_dst)
1361 afinfo->init_dst(net, xdst);
1360 } else 1362 } else
1361 xdst = ERR_PTR(-ENOBUFS); 1363 xdst = ERR_PTR(-ENOBUFS);
1362 1364
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 87cd0e4d4282..210be48d8ae3 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1994,8 +1994,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
1994 goto error; 1994 goto error;
1995 1995
1996 x->outer_mode = xfrm_get_mode(x->props.mode, family); 1996 x->outer_mode = xfrm_get_mode(x->props.mode, family);
1997 if (x->outer_mode == NULL) 1997 if (x->outer_mode == NULL) {
1998 err = -EPROTONOSUPPORT;
1998 goto error; 1999 goto error;
2000 }
1999 2001
2000 if (init_replay) { 2002 if (init_replay) {
2001 err = xfrm_init_replay(x); 2003 err = xfrm_init_replay(x);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 913d6bdfdda3..ca05ba217f5f 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3016,7 +3016,8 @@ sub process {
3016 $herectx .= raw_line($linenr, $n) . "\n"; 3016 $herectx .= raw_line($linenr, $n) . "\n";
3017 } 3017 }
3018 3018
3019 if (($stmts =~ tr/;/;/) == 1) { 3019 if (($stmts =~ tr/;/;/) == 1 &&
3020 $stmts !~ /^\s*(if|while|for|switch)\b/) {
3020 WARN("SINGLE_STATEMENT_DO_WHILE_MACRO", 3021 WARN("SINGLE_STATEMENT_DO_WHILE_MACRO",
3021 "Single statement macros should not use a do {} while (0) loop\n" . "$herectx"); 3022 "Single statement macros should not use a do {} while (0) loop\n" . "$herectx");
3022 } 3023 }
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 0d7b25e81643..4e1fda75c1c9 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -106,7 +106,7 @@ static struct pxa2xx_pcm_client pxa2xx_ac97_pcm_client = {
106 .prepare = pxa2xx_ac97_pcm_prepare, 106 .prepare = pxa2xx_ac97_pcm_prepare,
107}; 107};
108 108
109#ifdef CONFIG_PM 109#ifdef CONFIG_PM_SLEEP
110 110
111static int pxa2xx_ac97_do_suspend(struct snd_card *card) 111static int pxa2xx_ac97_do_suspend(struct snd_card *card)
112{ 112{
@@ -243,7 +243,7 @@ static struct platform_driver pxa2xx_ac97_driver = {
243 .driver = { 243 .driver = {
244 .name = "pxa2xx-ac97", 244 .name = "pxa2xx-ac97",
245 .owner = THIS_MODULE, 245 .owner = THIS_MODULE,
246#ifdef CONFIG_PM 246#ifdef CONFIG_PM_SLEEP
247 .pm = &pxa2xx_ac97_pm_ops, 247 .pm = &pxa2xx_ac97_pm_ops,
248#endif 248#endif
249 }, 249 },
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index eb4ceb71123e..277ebce23a45 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -452,6 +452,7 @@ static int __devinit atmel_abdac_probe(struct platform_device *pdev)
452 dac->regs = ioremap(regs->start, resource_size(regs)); 452 dac->regs = ioremap(regs->start, resource_size(regs));
453 if (!dac->regs) { 453 if (!dac->regs) {
454 dev_dbg(&pdev->dev, "could not remap register memory\n"); 454 dev_dbg(&pdev->dev, "could not remap register memory\n");
455 retval = -ENOMEM;
455 goto out_free_card; 456 goto out_free_card;
456 } 457 }
457 458
@@ -534,7 +535,7 @@ out_put_pclk:
534 return retval; 535 return retval;
535} 536}
536 537
537#ifdef CONFIG_PM 538#ifdef CONFIG_PM_SLEEP
538static int atmel_abdac_suspend(struct device *pdev) 539static int atmel_abdac_suspend(struct device *pdev)
539{ 540{
540 struct snd_card *card = dev_get_drvdata(pdev); 541 struct snd_card *card = dev_get_drvdata(pdev);
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index bf47025bdf45..9052aff37f64 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -278,14 +278,9 @@ static int atmel_ac97c_capture_hw_params(struct snd_pcm_substream *substream,
278 if (retval < 0) 278 if (retval < 0)
279 return retval; 279 return retval;
280 /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */ 280 /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
281 if (cpu_is_at32ap7000()) { 281 if (cpu_is_at32ap7000() && retval == 1)
282 if (retval < 0) 282 if (test_and_clear_bit(DMA_RX_READY, &chip->flags))
283 return retval; 283 dw_dma_cyclic_free(chip->dma.rx_chan);
284 /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
285 if (retval == 1)
286 if (test_and_clear_bit(DMA_RX_READY, &chip->flags))
287 dw_dma_cyclic_free(chip->dma.rx_chan);
288 }
289 284
290 /* Set restrictions to params. */ 285 /* Set restrictions to params. */
291 mutex_lock(&opened_mutex); 286 mutex_lock(&opened_mutex);
@@ -980,6 +975,7 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev)
980 975
981 if (!chip->regs) { 976 if (!chip->regs) {
982 dev_dbg(&pdev->dev, "could not remap register memory\n"); 977 dev_dbg(&pdev->dev, "could not remap register memory\n");
978 retval = -ENOMEM;
983 goto err_ioremap; 979 goto err_ioremap;
984 } 980 }
985 981
@@ -1134,7 +1130,7 @@ err_snd_card_new:
1134 return retval; 1130 return retval;
1135} 1131}
1136 1132
1137#ifdef CONFIG_PM 1133#ifdef CONFIG_PM_SLEEP
1138static int atmel_ac97c_suspend(struct device *pdev) 1134static int atmel_ac97c_suspend(struct device *pdev)
1139{ 1135{
1140 struct snd_card *card = dev_get_drvdata(pdev); 1136 struct snd_card *card = dev_get_drvdata(pdev);
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 1128b35b2b05..5a34355e78e8 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -1176,7 +1176,7 @@ static int __devexit loopback_remove(struct platform_device *devptr)
1176 return 0; 1176 return 0;
1177} 1177}
1178 1178
1179#ifdef CONFIG_PM 1179#ifdef CONFIG_PM_SLEEP
1180static int loopback_suspend(struct device *pdev) 1180static int loopback_suspend(struct device *pdev)
1181{ 1181{
1182 struct snd_card *card = dev_get_drvdata(pdev); 1182 struct snd_card *card = dev_get_drvdata(pdev);
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index f7d3bfc6bca8..54bb6644a598 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -1064,7 +1064,7 @@ static int __devexit snd_dummy_remove(struct platform_device *devptr)
1064 return 0; 1064 return 0;
1065} 1065}
1066 1066
1067#ifdef CONFIG_PM 1067#ifdef CONFIG_PM_SLEEP
1068static int snd_dummy_suspend(struct device *pdev) 1068static int snd_dummy_suspend(struct device *pdev)
1069{ 1069{
1070 struct snd_card *card = dev_get_drvdata(pdev); 1070 struct snd_card *card = dev_get_drvdata(pdev);
diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c
index 6ca59fc6dcb9..ef171295f6d4 100644
--- a/sound/drivers/pcsp/pcsp.c
+++ b/sound/drivers/pcsp/pcsp.c
@@ -199,7 +199,7 @@ static void pcsp_stop_beep(struct snd_pcsp *chip)
199 pcspkr_stop_sound(); 199 pcspkr_stop_sound();
200} 200}
201 201
202#ifdef CONFIG_PM 202#ifdef CONFIG_PM_SLEEP
203static int pcsp_suspend(struct device *dev) 203static int pcsp_suspend(struct device *dev)
204{ 204{
205 struct snd_pcsp *chip = dev_get_drvdata(dev); 205 struct snd_pcsp *chip = dev_get_drvdata(dev);
@@ -212,7 +212,7 @@ static SIMPLE_DEV_PM_OPS(pcsp_pm, pcsp_suspend, NULL);
212#define PCSP_PM_OPS &pcsp_pm 212#define PCSP_PM_OPS &pcsp_pm
213#else 213#else
214#define PCSP_PM_OPS NULL 214#define PCSP_PM_OPS NULL
215#endif /* CONFIG_PM */ 215#endif /* CONFIG_PM_SLEEP */
216 216
217static void pcsp_shutdown(struct platform_device *dev) 217static void pcsp_shutdown(struct platform_device *dev)
218{ 218{
diff --git a/sound/isa/als100.c b/sound/isa/als100.c
index 2d67c78c9f4b..f7cdaf51512d 100644
--- a/sound/isa/als100.c
+++ b/sound/isa/als100.c
@@ -233,7 +233,7 @@ static int __devinit snd_card_als100_probe(int dev,
233 irq[dev], dma8[dev], dma16[dev]); 233 irq[dev], dma8[dev], dma16[dev]);
234 } 234 }
235 235
236 if ((error = snd_sb16dsp_pcm(chip, 0, NULL)) < 0) { 236 if ((error = snd_sb16dsp_pcm(chip, 0, &chip->pcm)) < 0) {
237 snd_card_free(card); 237 snd_card_free(card);
238 return error; 238 return error;
239 } 239 }
diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
index 733b014ec7d1..b2b3c014221a 100644
--- a/sound/oss/sb_audio.c
+++ b/sound/oss/sb_audio.c
@@ -575,13 +575,15 @@ static int jazz16_audio_set_speed(int dev, int speed)
575 if (speed > 0) 575 if (speed > 0)
576 { 576 {
577 int tmp; 577 int tmp;
578 int s = speed * devc->channels; 578 int s;
579 579
580 if (speed < 5000) 580 if (speed < 5000)
581 speed = 5000; 581 speed = 5000;
582 if (speed > 44100) 582 if (speed > 44100)
583 speed = 44100; 583 speed = 44100;
584 584
585 s = speed * devc->channels;
586
585 devc->tconst = (256 - ((1000000 + s / 2) / s)) & 0xff; 587 devc->tconst = (256 - ((1000000 + s / 2) / s)) & 0xff;
586 588
587 tmp = 256 - devc->tconst; 589 tmp = 256 - devc->tconst;
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index f75f5ffdfdfb..a71d1c14a0f6 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -94,7 +94,7 @@ static unsigned short snd_cs46xx_codec_read(struct snd_cs46xx *chip,
94 94
95 if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX && 95 if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX &&
96 codec_index != CS46XX_SECONDARY_CODEC_INDEX)) 96 codec_index != CS46XX_SECONDARY_CODEC_INDEX))
97 return -EINVAL; 97 return 0xffff;
98 98
99 chip->active_ctrl(chip, 1); 99 chip->active_ctrl(chip, 1);
100 100
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index 8e40262d4117..2f6e9c762d3f 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -1725,8 +1725,10 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci,
1725 atc_connect_resources(atc); 1725 atc_connect_resources(atc);
1726 1726
1727 atc->timer = ct_timer_new(atc); 1727 atc->timer = ct_timer_new(atc);
1728 if (!atc->timer) 1728 if (!atc->timer) {
1729 err = -ENOMEM;
1729 goto error1; 1730 goto error1;
1731 }
1730 1732
1731 err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, atc, &ops); 1733 err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, atc, &ops);
1732 if (err < 0) 1734 if (err < 0)
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index 0bc2315b181d..0849aac449f2 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -231,16 +231,22 @@ void snd_hda_detach_beep_device(struct hda_codec *codec)
231} 231}
232EXPORT_SYMBOL_HDA(snd_hda_detach_beep_device); 232EXPORT_SYMBOL_HDA(snd_hda_detach_beep_device);
233 233
234static bool ctl_has_mute(struct snd_kcontrol *kcontrol)
235{
236 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
237 return query_amp_caps(codec, get_amp_nid(kcontrol),
238 get_amp_direction(kcontrol)) & AC_AMPCAP_MUTE;
239}
240
234/* get/put callbacks for beep mute mixer switches */ 241/* get/put callbacks for beep mute mixer switches */
235int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol, 242int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol,
236 struct snd_ctl_elem_value *ucontrol) 243 struct snd_ctl_elem_value *ucontrol)
237{ 244{
238 struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 245 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
239 struct hda_beep *beep = codec->beep; 246 struct hda_beep *beep = codec->beep;
240 if (beep) { 247 if (beep && (!beep->enabled || !ctl_has_mute(kcontrol))) {
241 ucontrol->value.integer.value[0] = 248 ucontrol->value.integer.value[0] =
242 ucontrol->value.integer.value[1] = 249 ucontrol->value.integer.value[1] = beep->enabled;
243 beep->enabled;
244 return 0; 250 return 0;
245 } 251 }
246 return snd_hda_mixer_amp_switch_get(kcontrol, ucontrol); 252 return snd_hda_mixer_amp_switch_get(kcontrol, ucontrol);
@@ -252,9 +258,20 @@ int snd_hda_mixer_amp_switch_put_beep(struct snd_kcontrol *kcontrol,
252{ 258{
253 struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 259 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
254 struct hda_beep *beep = codec->beep; 260 struct hda_beep *beep = codec->beep;
255 if (beep) 261 if (beep) {
256 snd_hda_enable_beep_device(codec, 262 u8 chs = get_amp_channels(kcontrol);
257 *ucontrol->value.integer.value); 263 int enable = 0;
264 long *valp = ucontrol->value.integer.value;
265 if (chs & 1) {
266 enable |= *valp;
267 valp++;
268 }
269 if (chs & 2)
270 enable |= *valp;
271 snd_hda_enable_beep_device(codec, enable);
272 }
273 if (!ctl_has_mute(kcontrol))
274 return 0;
258 return snd_hda_mixer_amp_switch_put(kcontrol, ucontrol); 275 return snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
259} 276}
260EXPORT_SYMBOL_HDA(snd_hda_mixer_amp_switch_put_beep); 277EXPORT_SYMBOL_HDA(snd_hda_mixer_amp_switch_put_beep);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 88a9c20eb7a2..f560051a949e 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1386,6 +1386,44 @@ int snd_hda_codec_configure(struct hda_codec *codec)
1386} 1386}
1387EXPORT_SYMBOL_HDA(snd_hda_codec_configure); 1387EXPORT_SYMBOL_HDA(snd_hda_codec_configure);
1388 1388
1389/* update the stream-id if changed */
1390static void update_pcm_stream_id(struct hda_codec *codec,
1391 struct hda_cvt_setup *p, hda_nid_t nid,
1392 u32 stream_tag, int channel_id)
1393{
1394 unsigned int oldval, newval;
1395
1396 if (p->stream_tag != stream_tag || p->channel_id != channel_id) {
1397 oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
1398 newval = (stream_tag << 4) | channel_id;
1399 if (oldval != newval)
1400 snd_hda_codec_write(codec, nid, 0,
1401 AC_VERB_SET_CHANNEL_STREAMID,
1402 newval);
1403 p->stream_tag = stream_tag;
1404 p->channel_id = channel_id;
1405 }
1406}
1407
1408/* update the format-id if changed */
1409static void update_pcm_format(struct hda_codec *codec, struct hda_cvt_setup *p,
1410 hda_nid_t nid, int format)
1411{
1412 unsigned int oldval;
1413
1414 if (p->format_id != format) {
1415 oldval = snd_hda_codec_read(codec, nid, 0,
1416 AC_VERB_GET_STREAM_FORMAT, 0);
1417 if (oldval != format) {
1418 msleep(1);
1419 snd_hda_codec_write(codec, nid, 0,
1420 AC_VERB_SET_STREAM_FORMAT,
1421 format);
1422 }
1423 p->format_id = format;
1424 }
1425}
1426
1389/** 1427/**
1390 * snd_hda_codec_setup_stream - set up the codec for streaming 1428 * snd_hda_codec_setup_stream - set up the codec for streaming
1391 * @codec: the CODEC to set up 1429 * @codec: the CODEC to set up
@@ -1400,7 +1438,6 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
1400{ 1438{
1401 struct hda_codec *c; 1439 struct hda_codec *c;
1402 struct hda_cvt_setup *p; 1440 struct hda_cvt_setup *p;
1403 unsigned int oldval, newval;
1404 int type; 1441 int type;
1405 int i; 1442 int i;
1406 1443
@@ -1413,29 +1450,13 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
1413 p = get_hda_cvt_setup(codec, nid); 1450 p = get_hda_cvt_setup(codec, nid);
1414 if (!p) 1451 if (!p)
1415 return; 1452 return;
1416 /* update the stream-id if changed */ 1453
1417 if (p->stream_tag != stream_tag || p->channel_id != channel_id) { 1454 if (codec->pcm_format_first)
1418 oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); 1455 update_pcm_format(codec, p, nid, format);
1419 newval = (stream_tag << 4) | channel_id; 1456 update_pcm_stream_id(codec, p, nid, stream_tag, channel_id);
1420 if (oldval != newval) 1457 if (!codec->pcm_format_first)
1421 snd_hda_codec_write(codec, nid, 0, 1458 update_pcm_format(codec, p, nid, format);
1422 AC_VERB_SET_CHANNEL_STREAMID, 1459
1423 newval);
1424 p->stream_tag = stream_tag;
1425 p->channel_id = channel_id;
1426 }
1427 /* update the format-id if changed */
1428 if (p->format_id != format) {
1429 oldval = snd_hda_codec_read(codec, nid, 0,
1430 AC_VERB_GET_STREAM_FORMAT, 0);
1431 if (oldval != format) {
1432 msleep(1);
1433 snd_hda_codec_write(codec, nid, 0,
1434 AC_VERB_SET_STREAM_FORMAT,
1435 format);
1436 }
1437 p->format_id = format;
1438 }
1439 p->active = 1; 1460 p->active = 1;
1440 p->dirty = 0; 1461 p->dirty = 0;
1441 1462
@@ -3497,7 +3518,7 @@ static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec, hda_nid_t fg
3497{ 3518{
3498 int sup = snd_hda_param_read(codec, fg, AC_PAR_POWER_STATE); 3519 int sup = snd_hda_param_read(codec, fg, AC_PAR_POWER_STATE);
3499 3520
3500 if (sup < 0) 3521 if (sup == -1)
3501 return false; 3522 return false;
3502 if (sup & power_state) 3523 if (sup & power_state)
3503 return true; 3524 return true;
@@ -4433,6 +4454,8 @@ static void __snd_hda_power_up(struct hda_codec *codec, bool wait_power_down)
4433 * then there is no need to go through power up here. 4454 * then there is no need to go through power up here.
4434 */ 4455 */
4435 if (codec->power_on) { 4456 if (codec->power_on) {
4457 if (codec->power_transition < 0)
4458 codec->power_transition = 0;
4436 spin_unlock(&codec->power_lock); 4459 spin_unlock(&codec->power_lock);
4437 return; 4460 return;
4438 } 4461 }
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index c422d330ca54..7fbc1bcaf1a9 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -861,6 +861,7 @@ struct hda_codec {
861 unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */ 861 unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
862 unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */ 862 unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */
863 unsigned int no_jack_detect:1; /* Machine has no jack-detection */ 863 unsigned int no_jack_detect:1; /* Machine has no jack-detection */
864 unsigned int pcm_format_first:1; /* PCM format must be set first */
864#ifdef CONFIG_SND_HDA_POWER_SAVE 865#ifdef CONFIG_SND_HDA_POWER_SAVE
865 unsigned int power_on :1; /* current (global) power-state */ 866 unsigned int power_on :1; /* current (global) power-state */
866 int power_transition; /* power-state in transition */ 867 int power_transition; /* power-state in transition */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index c8aced182fd1..60882c62f180 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -151,6 +151,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
151 "{Intel, CPT}," 151 "{Intel, CPT},"
152 "{Intel, PPT}," 152 "{Intel, PPT},"
153 "{Intel, LPT}," 153 "{Intel, LPT},"
154 "{Intel, LPT_LP},"
154 "{Intel, HPT}," 155 "{Intel, HPT},"
155 "{Intel, PBG}," 156 "{Intel, PBG},"
156 "{Intel, SCH}," 157 "{Intel, SCH},"
@@ -3270,6 +3271,14 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
3270 { PCI_DEVICE(0x8086, 0x8c20), 3271 { PCI_DEVICE(0x8086, 0x8c20),
3271 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | 3272 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
3272 AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO }, 3273 AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO },
3274 /* Lynx Point-LP */
3275 { PCI_DEVICE(0x8086, 0x9c20),
3276 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
3277 AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO },
3278 /* Lynx Point-LP */
3279 { PCI_DEVICE(0x8086, 0x9c21),
3280 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
3281 AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO },
3273 /* Haswell */ 3282 /* Haswell */
3274 { PCI_DEVICE(0x8086, 0x0c0c), 3283 { PCI_DEVICE(0x8086, 0x0c0c),
3275 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | 3284 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index 7e46258fc700..6894ec66258c 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -412,7 +412,7 @@ static void print_digital_conv(struct snd_info_buffer *buffer,
412 if (digi1 & AC_DIG1_EMPHASIS) 412 if (digi1 & AC_DIG1_EMPHASIS)
413 snd_iprintf(buffer, " Preemphasis"); 413 snd_iprintf(buffer, " Preemphasis");
414 if (digi1 & AC_DIG1_COPYRIGHT) 414 if (digi1 & AC_DIG1_COPYRIGHT)
415 snd_iprintf(buffer, " Copyright"); 415 snd_iprintf(buffer, " Non-Copyright");
416 if (digi1 & AC_DIG1_NONAUDIO) 416 if (digi1 & AC_DIG1_NONAUDIO)
417 snd_iprintf(buffer, " Non-Audio"); 417 snd_iprintf(buffer, " Non-Audio");
418 if (digi1 & AC_DIG1_PROFESSIONAL) 418 if (digi1 & AC_DIG1_PROFESSIONAL)
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index d0d3540e39e7..49750a96d649 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -246,7 +246,7 @@ static void init_output(struct hda_codec *codec, hda_nid_t pin, hda_nid_t dac)
246 AC_VERB_SET_AMP_GAIN_MUTE, 246 AC_VERB_SET_AMP_GAIN_MUTE,
247 AMP_OUT_UNMUTE); 247 AMP_OUT_UNMUTE);
248 } 248 }
249 if (dac) 249 if (dac && (get_wcaps(codec, dac) & AC_WCAP_OUT_AMP))
250 snd_hda_codec_write(codec, dac, 0, 250 snd_hda_codec_write(codec, dac, 0,
251 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO); 251 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO);
252} 252}
@@ -261,7 +261,7 @@ static void init_input(struct hda_codec *codec, hda_nid_t pin, hda_nid_t adc)
261 AC_VERB_SET_AMP_GAIN_MUTE, 261 AC_VERB_SET_AMP_GAIN_MUTE,
262 AMP_IN_UNMUTE(0)); 262 AMP_IN_UNMUTE(0));
263 } 263 }
264 if (adc) 264 if (adc && (get_wcaps(codec, adc) & AC_WCAP_IN_AMP))
265 snd_hda_codec_write(codec, adc, 0, AC_VERB_SET_AMP_GAIN_MUTE, 265 snd_hda_codec_write(codec, adc, 0, AC_VERB_SET_AMP_GAIN_MUTE,
266 AMP_IN_UNMUTE(0)); 266 AMP_IN_UNMUTE(0));
267} 267}
@@ -275,6 +275,10 @@ static int _add_switch(struct hda_codec *codec, hda_nid_t nid, const char *pfx,
275 int type = dir ? HDA_INPUT : HDA_OUTPUT; 275 int type = dir ? HDA_INPUT : HDA_OUTPUT;
276 struct snd_kcontrol_new knew = 276 struct snd_kcontrol_new knew =
277 HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type); 277 HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type);
278 if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_MUTE) == 0) {
279 snd_printdd("Skipping '%s %s Switch' (no mute on node 0x%x)\n", pfx, dirstr[dir], nid);
280 return 0;
281 }
278 sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]); 282 sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]);
279 return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); 283 return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
280} 284}
@@ -286,6 +290,10 @@ static int _add_volume(struct hda_codec *codec, hda_nid_t nid, const char *pfx,
286 int type = dir ? HDA_INPUT : HDA_OUTPUT; 290 int type = dir ? HDA_INPUT : HDA_OUTPUT;
287 struct snd_kcontrol_new knew = 291 struct snd_kcontrol_new knew =
288 HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type); 292 HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type);
293 if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_NUM_STEPS) == 0) {
294 snd_printdd("Skipping '%s %s Volume' (no amp on node 0x%x)\n", pfx, dirstr[dir], nid);
295 return 0;
296 }
289 sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]); 297 sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]);
290 return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); 298 return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
291} 299}
@@ -464,50 +472,17 @@ exit:
464} 472}
465 473
466/* 474/*
467 * PCM stuffs 475 * PCM callbacks
468 */ 476 */
469static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid, 477static int ca0132_playback_pcm_open(struct hda_pcm_stream *hinfo,
470 u32 stream_tag, 478 struct hda_codec *codec,
471 int channel_id, int format) 479 struct snd_pcm_substream *substream)
472{ 480{
473 unsigned int oldval, newval; 481 struct ca0132_spec *spec = codec->spec;
474 482 return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream,
475 if (!nid) 483 hinfo);
476 return;
477
478 snd_printdd("ca0132_setup_stream: "
479 "NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n",
480 nid, stream_tag, channel_id, format);
481
482 /* update the format-id if changed */
483 oldval = snd_hda_codec_read(codec, nid, 0,
484 AC_VERB_GET_STREAM_FORMAT,
485 0);
486 if (oldval != format) {
487 msleep(20);
488 snd_hda_codec_write(codec, nid, 0,
489 AC_VERB_SET_STREAM_FORMAT,
490 format);
491 }
492
493 oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
494 newval = (stream_tag << 4) | channel_id;
495 if (oldval != newval) {
496 snd_hda_codec_write(codec, nid, 0,
497 AC_VERB_SET_CHANNEL_STREAMID,
498 newval);
499 }
500}
501
502static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid)
503{
504 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0);
505 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0);
506} 484}
507 485
508/*
509 * PCM callbacks
510 */
511static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo, 486static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
512 struct hda_codec *codec, 487 struct hda_codec *codec,
513 unsigned int stream_tag, 488 unsigned int stream_tag,
@@ -515,10 +490,8 @@ static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
515 struct snd_pcm_substream *substream) 490 struct snd_pcm_substream *substream)
516{ 491{
517 struct ca0132_spec *spec = codec->spec; 492 struct ca0132_spec *spec = codec->spec;
518 493 return snd_hda_multi_out_analog_prepare(codec, &spec->multiout,
519 ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format); 494 stream_tag, format, substream);
520
521 return 0;
522} 495}
523 496
524static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, 497static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
@@ -526,92 +499,45 @@ static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
526 struct snd_pcm_substream *substream) 499 struct snd_pcm_substream *substream)
527{ 500{
528 struct ca0132_spec *spec = codec->spec; 501 struct ca0132_spec *spec = codec->spec;
529 502 return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
530 ca0132_cleanup_stream(codec, spec->dacs[0]);
531
532 return 0;
533} 503}
534 504
535/* 505/*
536 * Digital out 506 * Digital out
537 */ 507 */
538static int ca0132_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo, 508static int ca0132_dig_playback_pcm_open(struct hda_pcm_stream *hinfo,
539 struct hda_codec *codec, 509 struct hda_codec *codec,
540 unsigned int stream_tag, 510 struct snd_pcm_substream *substream)
541 unsigned int format,
542 struct snd_pcm_substream *substream)
543{ 511{
544 struct ca0132_spec *spec = codec->spec; 512 struct ca0132_spec *spec = codec->spec;
545 513 return snd_hda_multi_out_dig_open(codec, &spec->multiout);
546 ca0132_setup_stream(codec, spec->dig_out, stream_tag, 0, format);
547
548 return 0;
549} 514}
550 515
551static int ca0132_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, 516static int ca0132_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
552 struct hda_codec *codec,
553 struct snd_pcm_substream *substream)
554{
555 struct ca0132_spec *spec = codec->spec;
556
557 ca0132_cleanup_stream(codec, spec->dig_out);
558
559 return 0;
560}
561
562/*
563 * Analog capture
564 */
565static int ca0132_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
566 struct hda_codec *codec, 517 struct hda_codec *codec,
567 unsigned int stream_tag, 518 unsigned int stream_tag,
568 unsigned int format, 519 unsigned int format,
569 struct snd_pcm_substream *substream) 520 struct snd_pcm_substream *substream)
570{ 521{
571 struct ca0132_spec *spec = codec->spec; 522 struct ca0132_spec *spec = codec->spec;
572 523 return snd_hda_multi_out_dig_prepare(codec, &spec->multiout,
573 ca0132_setup_stream(codec, spec->adcs[substream->number], 524 stream_tag, format, substream);
574 stream_tag, 0, format);
575
576 return 0;
577} 525}
578 526
579static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, 527static int ca0132_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
580 struct hda_codec *codec, 528 struct hda_codec *codec,
581 struct snd_pcm_substream *substream) 529 struct snd_pcm_substream *substream)
582{ 530{
583 struct ca0132_spec *spec = codec->spec; 531 struct ca0132_spec *spec = codec->spec;
584 532 return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout);
585 ca0132_cleanup_stream(codec, spec->adcs[substream->number]);
586
587 return 0;
588} 533}
589 534
590/* 535static int ca0132_dig_playback_pcm_close(struct hda_pcm_stream *hinfo,
591 * Digital capture 536 struct hda_codec *codec,
592 */ 537 struct snd_pcm_substream *substream)
593static int ca0132_dig_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
594 struct hda_codec *codec,
595 unsigned int stream_tag,
596 unsigned int format,
597 struct snd_pcm_substream *substream)
598{ 538{
599 struct ca0132_spec *spec = codec->spec; 539 struct ca0132_spec *spec = codec->spec;
600 540 return snd_hda_multi_out_dig_close(codec, &spec->multiout);
601 ca0132_setup_stream(codec, spec->dig_in, stream_tag, 0, format);
602
603 return 0;
604}
605
606static int ca0132_dig_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
607 struct hda_codec *codec,
608 struct snd_pcm_substream *substream)
609{
610 struct ca0132_spec *spec = codec->spec;
611
612 ca0132_cleanup_stream(codec, spec->dig_in);
613
614 return 0;
615} 541}
616 542
617/* 543/*
@@ -621,6 +547,7 @@ static struct hda_pcm_stream ca0132_pcm_analog_playback = {
621 .channels_min = 2, 547 .channels_min = 2,
622 .channels_max = 2, 548 .channels_max = 2,
623 .ops = { 549 .ops = {
550 .open = ca0132_playback_pcm_open,
624 .prepare = ca0132_playback_pcm_prepare, 551 .prepare = ca0132_playback_pcm_prepare,
625 .cleanup = ca0132_playback_pcm_cleanup 552 .cleanup = ca0132_playback_pcm_cleanup
626 }, 553 },
@@ -630,10 +557,6 @@ static struct hda_pcm_stream ca0132_pcm_analog_capture = {
630 .substreams = 1, 557 .substreams = 1,
631 .channels_min = 2, 558 .channels_min = 2,
632 .channels_max = 2, 559 .channels_max = 2,
633 .ops = {
634 .prepare = ca0132_capture_pcm_prepare,
635 .cleanup = ca0132_capture_pcm_cleanup
636 },
637}; 560};
638 561
639static struct hda_pcm_stream ca0132_pcm_digital_playback = { 562static struct hda_pcm_stream ca0132_pcm_digital_playback = {
@@ -641,6 +564,8 @@ static struct hda_pcm_stream ca0132_pcm_digital_playback = {
641 .channels_min = 2, 564 .channels_min = 2,
642 .channels_max = 2, 565 .channels_max = 2,
643 .ops = { 566 .ops = {
567 .open = ca0132_dig_playback_pcm_open,
568 .close = ca0132_dig_playback_pcm_close,
644 .prepare = ca0132_dig_playback_pcm_prepare, 569 .prepare = ca0132_dig_playback_pcm_prepare,
645 .cleanup = ca0132_dig_playback_pcm_cleanup 570 .cleanup = ca0132_dig_playback_pcm_cleanup
646 }, 571 },
@@ -650,10 +575,6 @@ static struct hda_pcm_stream ca0132_pcm_digital_capture = {
650 .substreams = 1, 575 .substreams = 1,
651 .channels_min = 2, 576 .channels_min = 2,
652 .channels_max = 2, 577 .channels_max = 2,
653 .ops = {
654 .prepare = ca0132_dig_capture_pcm_prepare,
655 .cleanup = ca0132_dig_capture_pcm_cleanup
656 },
657}; 578};
658 579
659static int ca0132_build_pcms(struct hda_codec *codec) 580static int ca0132_build_pcms(struct hda_codec *codec)
@@ -928,18 +849,16 @@ static int ca0132_build_controls(struct hda_codec *codec)
928 spec->dig_out); 849 spec->dig_out);
929 if (err < 0) 850 if (err < 0)
930 return err; 851 return err;
931 err = add_out_volume(codec, spec->dig_out, "IEC958"); 852 err = snd_hda_create_spdif_share_sw(codec, &spec->multiout);
932 if (err < 0) 853 if (err < 0)
933 return err; 854 return err;
855 /* spec->multiout.share_spdif = 1; */
934 } 856 }
935 857
936 if (spec->dig_in) { 858 if (spec->dig_in) {
937 err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in); 859 err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in);
938 if (err < 0) 860 if (err < 0)
939 return err; 861 return err;
940 err = add_in_volume(codec, spec->dig_in, "IEC958");
941 if (err < 0)
942 return err;
943 } 862 }
944 return 0; 863 return 0;
945} 864}
@@ -961,6 +880,9 @@ static void ca0132_config(struct hda_codec *codec)
961 struct ca0132_spec *spec = codec->spec; 880 struct ca0132_spec *spec = codec->spec;
962 struct auto_pin_cfg *cfg = &spec->autocfg; 881 struct auto_pin_cfg *cfg = &spec->autocfg;
963 882
883 codec->pcm_format_first = 1;
884 codec->no_sticky_stream = 1;
885
964 /* line-outs */ 886 /* line-outs */
965 cfg->line_outs = 1; 887 cfg->line_outs = 1;
966 cfg->line_out_pins[0] = 0x0b; /* front */ 888 cfg->line_out_pins[0] = 0x0b; /* front */
@@ -988,14 +910,24 @@ static void ca0132_config(struct hda_codec *codec)
988 910
989 /* Mic-in */ 911 /* Mic-in */
990 spec->input_pins[0] = 0x12; 912 spec->input_pins[0] = 0x12;
991 spec->input_labels[0] = "Mic-In"; 913 spec->input_labels[0] = "Mic";
992 spec->adcs[0] = 0x07; 914 spec->adcs[0] = 0x07;
993 915
994 /* Line-In */ 916 /* Line-In */
995 spec->input_pins[1] = 0x11; 917 spec->input_pins[1] = 0x11;
996 spec->input_labels[1] = "Line-In"; 918 spec->input_labels[1] = "Line";
997 spec->adcs[1] = 0x08; 919 spec->adcs[1] = 0x08;
998 spec->num_inputs = 2; 920 spec->num_inputs = 2;
921
922 /* SPDIF I/O */
923 spec->dig_out = 0x05;
924 spec->multiout.dig_out_nid = spec->dig_out;
925 cfg->dig_out_pins[0] = 0x0c;
926 cfg->dig_outs = 1;
927 cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
928 spec->dig_in = 0x09;
929 cfg->dig_in_pin = 0x0e;
930 cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
999} 931}
1000 932
1001static void ca0132_init_chip(struct hda_codec *codec) 933static void ca0132_init_chip(struct hda_codec *codec)
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 94040ccf8e8f..ea5775a1a7db 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -4272,7 +4272,8 @@ static int stac92xx_init(struct hda_codec *codec)
4272 unsigned int gpio; 4272 unsigned int gpio;
4273 int i; 4273 int i;
4274 4274
4275 snd_hda_sequence_write(codec, spec->init); 4275 if (spec->init)
4276 snd_hda_sequence_write(codec, spec->init);
4276 4277
4277 /* power down adcs initially */ 4278 /* power down adcs initially */
4278 if (spec->powerdown_adcs) 4279 if (spec->powerdown_adcs)
@@ -5748,7 +5749,6 @@ again:
5748 /* fallthru */ 5749 /* fallthru */
5749 case 0x111d76b4: /* 6 Port without Analog Mixer */ 5750 case 0x111d76b4: /* 6 Port without Analog Mixer */
5750 case 0x111d76b5: 5751 case 0x111d76b5:
5751 spec->init = stac92hd71bxx_core_init;
5752 codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs; 5752 codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs;
5753 spec->num_dmics = stac92xx_connected_ports(codec, 5753 spec->num_dmics = stac92xx_connected_ports(codec,
5754 stac92hd71bxx_dmic_nids, 5754 stac92hd71bxx_dmic_nids,
@@ -5773,7 +5773,6 @@ again:
5773 spec->stream_delay = 40; /* 40 milliseconds */ 5773 spec->stream_delay = 40; /* 40 milliseconds */
5774 5774
5775 /* disable VSW */ 5775 /* disable VSW */
5776 spec->init = stac92hd71bxx_core_init;
5777 unmute_init++; 5776 unmute_init++;
5778 snd_hda_codec_set_pincfg(codec, 0x0f, 0x40f000f0); 5777 snd_hda_codec_set_pincfg(codec, 0x0f, 0x40f000f0);
5779 snd_hda_codec_set_pincfg(codec, 0x19, 0x40f000f3); 5778 snd_hda_codec_set_pincfg(codec, 0x19, 0x40f000f3);
@@ -5788,7 +5787,6 @@ again:
5788 5787
5789 /* fallthru */ 5788 /* fallthru */
5790 default: 5789 default:
5791 spec->init = stac92hd71bxx_core_init;
5792 codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs; 5790 codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs;
5793 spec->num_dmics = stac92xx_connected_ports(codec, 5791 spec->num_dmics = stac92xx_connected_ports(codec,
5794 stac92hd71bxx_dmic_nids, 5792 stac92hd71bxx_dmic_nids,
@@ -5796,6 +5794,9 @@ again:
5796 break; 5794 break;
5797 } 5795 }
5798 5796
5797 if (get_wcaps_type(get_wcaps(codec, 0x28)) == AC_WID_VOL_KNB)
5798 spec->init = stac92hd71bxx_core_init;
5799
5799 if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP) 5800 if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP)
5800 snd_hda_sequence_write_cache(codec, unmute_init); 5801 snd_hda_sequence_write_cache(codec, unmute_init);
5801 5802
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 80d90cb42853..430771776915 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -1752,6 +1752,14 @@ static int via_suspend(struct hda_codec *codec)
1752{ 1752{
1753 struct via_spec *spec = codec->spec; 1753 struct via_spec *spec = codec->spec;
1754 vt1708_stop_hp_work(spec); 1754 vt1708_stop_hp_work(spec);
1755
1756 if (spec->codec_type == VT1802) {
1757 /* Fix pop noise on headphones */
1758 int i;
1759 for (i = 0; i < spec->autocfg.hp_outs; i++)
1760 snd_hda_set_pin_ctl(codec, spec->autocfg.hp_pins[i], 0);
1761 }
1762
1755 return 0; 1763 return 0;
1756} 1764}
1757#endif 1765#endif
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index d1ab43706735..5579b08bb35b 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -851,6 +851,8 @@ static int __devinit lx_pcm_create(struct lx6464es *chip)
851 /* hardcoded device name & channel count */ 851 /* hardcoded device name & channel count */
852 err = snd_pcm_new(chip->card, (char *)card_name, 0, 852 err = snd_pcm_new(chip->card, (char *)card_name, 0,
853 1, 1, &pcm); 853 1, 1, &pcm);
854 if (err < 0)
855 return err;
854 856
855 pcm->private_data = chip; 857 pcm->private_data = chip;
856 858
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index b8ac8710f47f..b12308b5ba2a 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6585,7 +6585,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
6585 snd_printk(KERN_ERR "HDSPM: " 6585 snd_printk(KERN_ERR "HDSPM: "
6586 "unable to kmalloc Mixer memory of %d Bytes\n", 6586 "unable to kmalloc Mixer memory of %d Bytes\n",
6587 (int)sizeof(struct hdspm_mixer)); 6587 (int)sizeof(struct hdspm_mixer));
6588 return err; 6588 return -ENOMEM;
6589 } 6589 }
6590 6590
6591 hdspm->port_names_in = NULL; 6591 hdspm->port_names_in = NULL;
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index 512434efcc31..805ab6e9a78f 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -1377,8 +1377,9 @@ static int __devinit sis_chip_create(struct snd_card *card,
1377 if (rc) 1377 if (rc)
1378 goto error_out_cleanup; 1378 goto error_out_cleanup;
1379 1379
1380 if (request_irq(pci->irq, sis_interrupt, IRQF_SHARED, KBUILD_MODNAME, 1380 rc = request_irq(pci->irq, sis_interrupt, IRQF_SHARED, KBUILD_MODNAME,
1381 sis)) { 1381 sis);
1382 if (rc) {
1382 dev_err(&pci->dev, "unable to allocate irq %d\n", sis->irq); 1383 dev_err(&pci->dev, "unable to allocate irq %d\n", sis->irq);
1383 goto error_out_cleanup; 1384 goto error_out_cleanup;
1384 } 1385 }
diff --git a/sound/ppc/powermac.c b/sound/ppc/powermac.c
index f5ceb6f282de..210cafe04890 100644
--- a/sound/ppc/powermac.c
+++ b/sound/ppc/powermac.c
@@ -143,7 +143,7 @@ static int __devexit snd_pmac_remove(struct platform_device *devptr)
143 return 0; 143 return 0;
144} 144}
145 145
146#ifdef CONFIG_PM 146#ifdef CONFIG_PM_SLEEP
147static int snd_pmac_driver_suspend(struct device *dev) 147static int snd_pmac_driver_suspend(struct device *dev)
148{ 148{
149 struct snd_card *card = dev_get_drvdata(dev); 149 struct snd_card *card = dev_get_drvdata(dev);
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 1aa52eff526a..9b18b5243a56 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -1040,6 +1040,7 @@ static int __devinit snd_ps3_driver_probe(struct ps3_system_bus_device *dev)
1040 GFP_KERNEL); 1040 GFP_KERNEL);
1041 if (!the_card.null_buffer_start_vaddr) { 1041 if (!the_card.null_buffer_start_vaddr) {
1042 pr_info("%s: nullbuffer alloc failed\n", __func__); 1042 pr_info("%s: nullbuffer alloc failed\n", __func__);
1043 ret = -ENOMEM;
1043 goto clean_preallocate; 1044 goto clean_preallocate;
1044 } 1045 }
1045 pr_debug("%s: null vaddr=%p dma=%#llx\n", __func__, 1046 pr_debug("%s: null vaddr=%p dma=%#llx\n", __func__,
diff --git a/sound/soc/blackfin/bf6xx-sport.c b/sound/soc/blackfin/bf6xx-sport.c
index 318c5ba5360f..dfb744381c42 100644
--- a/sound/soc/blackfin/bf6xx-sport.c
+++ b/sound/soc/blackfin/bf6xx-sport.c
@@ -413,7 +413,14 @@ EXPORT_SYMBOL(sport_create);
413 413
414void sport_delete(struct sport_device *sport) 414void sport_delete(struct sport_device *sport)
415{ 415{
416 if (sport->tx_desc)
417 dma_free_coherent(NULL, sport->tx_desc_size,
418 sport->tx_desc, 0);
419 if (sport->rx_desc)
420 dma_free_coherent(NULL, sport->rx_desc_size,
421 sport->rx_desc, 0);
416 sport_free_resource(sport); 422 sport_free_resource(sport);
423 kfree(sport);
417} 424}
418EXPORT_SYMBOL(sport_delete); 425EXPORT_SYMBOL(sport_delete);
419 426
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 6537f16d383e..e33d327396ad 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -128,13 +128,9 @@ SOC_SINGLE_TLV("EQ4 B5 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B5_GAIN_SHIFT,
128 128
129ARIZONA_MIXER_CONTROLS("DRC1L", ARIZONA_DRC1LMIX_INPUT_1_SOURCE), 129ARIZONA_MIXER_CONTROLS("DRC1L", ARIZONA_DRC1LMIX_INPUT_1_SOURCE),
130ARIZONA_MIXER_CONTROLS("DRC1R", ARIZONA_DRC1RMIX_INPUT_1_SOURCE), 130ARIZONA_MIXER_CONTROLS("DRC1R", ARIZONA_DRC1RMIX_INPUT_1_SOURCE),
131ARIZONA_MIXER_CONTROLS("DRC2L", ARIZONA_DRC2LMIX_INPUT_1_SOURCE),
132ARIZONA_MIXER_CONTROLS("DRC2R", ARIZONA_DRC2RMIX_INPUT_1_SOURCE),
133 131
134SND_SOC_BYTES_MASK("DRC1", ARIZONA_DRC1_CTRL1, 5, 132SND_SOC_BYTES_MASK("DRC1", ARIZONA_DRC1_CTRL1, 5,
135 ARIZONA_DRC1R_ENA | ARIZONA_DRC1L_ENA), 133 ARIZONA_DRC1R_ENA | ARIZONA_DRC1L_ENA),
136SND_SOC_BYTES_MASK("DRC2", ARIZONA_DRC2_CTRL1, 5,
137 ARIZONA_DRC2R_ENA | ARIZONA_DRC2L_ENA),
138 134
139ARIZONA_MIXER_CONTROLS("LHPF1", ARIZONA_HPLP1MIX_INPUT_1_SOURCE), 135ARIZONA_MIXER_CONTROLS("LHPF1", ARIZONA_HPLP1MIX_INPUT_1_SOURCE),
140ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE), 136ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE),
@@ -236,8 +232,6 @@ ARIZONA_MIXER_ENUMS(EQ4, ARIZONA_EQ4MIX_INPUT_1_SOURCE);
236 232
237ARIZONA_MIXER_ENUMS(DRC1L, ARIZONA_DRC1LMIX_INPUT_1_SOURCE); 233ARIZONA_MIXER_ENUMS(DRC1L, ARIZONA_DRC1LMIX_INPUT_1_SOURCE);
238ARIZONA_MIXER_ENUMS(DRC1R, ARIZONA_DRC1RMIX_INPUT_1_SOURCE); 234ARIZONA_MIXER_ENUMS(DRC1R, ARIZONA_DRC1RMIX_INPUT_1_SOURCE);
239ARIZONA_MIXER_ENUMS(DRC2L, ARIZONA_DRC2LMIX_INPUT_1_SOURCE);
240ARIZONA_MIXER_ENUMS(DRC2R, ARIZONA_DRC2RMIX_INPUT_1_SOURCE);
241 235
242ARIZONA_MIXER_ENUMS(LHPF1, ARIZONA_HPLP1MIX_INPUT_1_SOURCE); 236ARIZONA_MIXER_ENUMS(LHPF1, ARIZONA_HPLP1MIX_INPUT_1_SOURCE);
243ARIZONA_MIXER_ENUMS(LHPF2, ARIZONA_HPLP2MIX_INPUT_1_SOURCE); 237ARIZONA_MIXER_ENUMS(LHPF2, ARIZONA_HPLP2MIX_INPUT_1_SOURCE);
@@ -349,10 +343,6 @@ SND_SOC_DAPM_PGA("DRC1L", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1L_ENA_SHIFT, 0,
349 NULL, 0), 343 NULL, 0),
350SND_SOC_DAPM_PGA("DRC1R", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1R_ENA_SHIFT, 0, 344SND_SOC_DAPM_PGA("DRC1R", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1R_ENA_SHIFT, 0,
351 NULL, 0), 345 NULL, 0),
352SND_SOC_DAPM_PGA("DRC2L", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2L_ENA_SHIFT, 0,
353 NULL, 0),
354SND_SOC_DAPM_PGA("DRC2R", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2R_ENA_SHIFT, 0,
355 NULL, 0),
356 346
357SND_SOC_DAPM_PGA("LHPF1", ARIZONA_HPLPF1_1, ARIZONA_LHPF1_ENA_SHIFT, 0, 347SND_SOC_DAPM_PGA("LHPF1", ARIZONA_HPLPF1_1, ARIZONA_LHPF1_ENA_SHIFT, 0,
358 NULL, 0), 348 NULL, 0),
@@ -466,8 +456,6 @@ ARIZONA_MIXER_WIDGETS(EQ4, "EQ4"),
466 456
467ARIZONA_MIXER_WIDGETS(DRC1L, "DRC1L"), 457ARIZONA_MIXER_WIDGETS(DRC1L, "DRC1L"),
468ARIZONA_MIXER_WIDGETS(DRC1R, "DRC1R"), 458ARIZONA_MIXER_WIDGETS(DRC1R, "DRC1R"),
469ARIZONA_MIXER_WIDGETS(DRC2L, "DRC2L"),
470ARIZONA_MIXER_WIDGETS(DRC2R, "DRC2R"),
471 459
472ARIZONA_MIXER_WIDGETS(LHPF1, "LHPF1"), 460ARIZONA_MIXER_WIDGETS(LHPF1, "LHPF1"),
473ARIZONA_MIXER_WIDGETS(LHPF2, "LHPF2"), 461ARIZONA_MIXER_WIDGETS(LHPF2, "LHPF2"),
@@ -553,8 +541,6 @@ SND_SOC_DAPM_OUTPUT("SPKDAT1R"),
553 { name, "EQ4", "EQ4" }, \ 541 { name, "EQ4", "EQ4" }, \
554 { name, "DRC1L", "DRC1L" }, \ 542 { name, "DRC1L", "DRC1L" }, \
555 { name, "DRC1R", "DRC1R" }, \ 543 { name, "DRC1R", "DRC1R" }, \
556 { name, "DRC2L", "DRC2L" }, \
557 { name, "DRC2R", "DRC2R" }, \
558 { name, "LHPF1", "LHPF1" }, \ 544 { name, "LHPF1", "LHPF1" }, \
559 { name, "LHPF2", "LHPF2" }, \ 545 { name, "LHPF2", "LHPF2" }, \
560 { name, "LHPF3", "LHPF3" }, \ 546 { name, "LHPF3", "LHPF3" }, \
@@ -639,6 +625,15 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
639 { "AIF2 Capture", NULL, "SYSCLK" }, 625 { "AIF2 Capture", NULL, "SYSCLK" },
640 { "AIF3 Capture", NULL, "SYSCLK" }, 626 { "AIF3 Capture", NULL, "SYSCLK" },
641 627
628 { "IN1L PGA", NULL, "IN1L" },
629 { "IN1R PGA", NULL, "IN1R" },
630
631 { "IN2L PGA", NULL, "IN2L" },
632 { "IN2R PGA", NULL, "IN2R" },
633
634 { "IN3L PGA", NULL, "IN3L" },
635 { "IN3R PGA", NULL, "IN3R" },
636
642 ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"), 637 ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
643 ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"), 638 ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
644 ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"), 639 ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"),
@@ -675,8 +670,6 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
675 670
676 ARIZONA_MIXER_ROUTES("DRC1L", "DRC1L"), 671 ARIZONA_MIXER_ROUTES("DRC1L", "DRC1L"),
677 ARIZONA_MIXER_ROUTES("DRC1R", "DRC1R"), 672 ARIZONA_MIXER_ROUTES("DRC1R", "DRC1R"),
678 ARIZONA_MIXER_ROUTES("DRC2L", "DRC2L"),
679 ARIZONA_MIXER_ROUTES("DRC2R", "DRC2R"),
680 673
681 ARIZONA_MIXER_ROUTES("LHPF1", "LHPF1"), 674 ARIZONA_MIXER_ROUTES("LHPF1", "LHPF1"),
682 ARIZONA_MIXER_ROUTES("LHPF2", "LHPF2"), 675 ARIZONA_MIXER_ROUTES("LHPF2", "LHPF2"),
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 8033f7065189..01ebbcc5c6a4 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -681,6 +681,18 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
681 { "AIF2 Capture", NULL, "SYSCLK" }, 681 { "AIF2 Capture", NULL, "SYSCLK" },
682 { "AIF3 Capture", NULL, "SYSCLK" }, 682 { "AIF3 Capture", NULL, "SYSCLK" },
683 683
684 { "IN1L PGA", NULL, "IN1L" },
685 { "IN1R PGA", NULL, "IN1R" },
686
687 { "IN2L PGA", NULL, "IN2L" },
688 { "IN2R PGA", NULL, "IN2R" },
689
690 { "IN3L PGA", NULL, "IN3L" },
691 { "IN3R PGA", NULL, "IN3R" },
692
693 { "IN4L PGA", NULL, "IN4L" },
694 { "IN4R PGA", NULL, "IN4R" },
695
684 ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"), 696 ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
685 ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"), 697 ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
686 ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"), 698 ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"),
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index aa9ce9dd7d8a..ce6720073798 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3733,21 +3733,6 @@ static int wm8962_runtime_resume(struct device *dev)
3733 3733
3734 regcache_sync(wm8962->regmap); 3734 regcache_sync(wm8962->regmap);
3735 3735
3736 regmap_update_bits(wm8962->regmap, WM8962_ANTI_POP,
3737 WM8962_STARTUP_BIAS_ENA | WM8962_VMID_BUF_ENA,
3738 WM8962_STARTUP_BIAS_ENA | WM8962_VMID_BUF_ENA);
3739
3740 /* Bias enable at 2*50k for ramp */
3741 regmap_update_bits(wm8962->regmap, WM8962_PWR_MGMT_1,
3742 WM8962_VMID_SEL_MASK | WM8962_BIAS_ENA,
3743 WM8962_BIAS_ENA | 0x180);
3744
3745 msleep(5);
3746
3747 /* VMID back to 2x250k for standby */
3748 regmap_update_bits(wm8962->regmap, WM8962_PWR_MGMT_1,
3749 WM8962_VMID_SEL_MASK, 0x100);
3750
3751 return 0; 3736 return 0;
3752} 3737}
3753 3738
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 04ef03175c51..6c9eeca85b95 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -4038,6 +4038,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
4038 break; 4038 break;
4039 case WM8958: 4039 case WM8958:
4040 if (wm8994->revision < 1) { 4040 if (wm8994->revision < 1) {
4041 snd_soc_dapm_add_routes(dapm, wm8994_intercon,
4042 ARRAY_SIZE(wm8994_intercon));
4041 snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon, 4043 snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon,
4042 ARRAY_SIZE(wm8994_revd_intercon)); 4044 ARRAY_SIZE(wm8994_revd_intercon));
4043 snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon, 4045 snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon,
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index f16fb361a4eb..c6d2076a796b 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -148,7 +148,7 @@ SOC_SINGLE("Treble Volume", AC97_MASTER_TONE, 0, 15, 1),
148 148
149SOC_SINGLE("Capture ADC Switch", AC97_REC_GAIN, 15, 1, 1), 149SOC_SINGLE("Capture ADC Switch", AC97_REC_GAIN, 15, 1, 1),
150SOC_ENUM("Capture Volume Steps", wm9712_enum[6]), 150SOC_ENUM("Capture Volume Steps", wm9712_enum[6]),
151SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 1), 151SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 0),
152SOC_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0), 152SOC_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0),
153 153
154SOC_SINGLE_TLV("Mic 1 Volume", AC97_MIC, 8, 31, 1, main_tlv), 154SOC_SINGLE_TLV("Mic 1 Volume", AC97_MIC, 8, 31, 1, main_tlv),
@@ -272,7 +272,7 @@ SOC_DAPM_ENUM("Route", wm9712_enum[9]);
272 272
273/* Mic select */ 273/* Mic select */
274static const struct snd_kcontrol_new wm9712_mic_src_controls = 274static const struct snd_kcontrol_new wm9712_mic_src_controls =
275SOC_DAPM_ENUM("Route", wm9712_enum[7]); 275SOC_DAPM_ENUM("Mic Source Select", wm9712_enum[7]);
276 276
277/* diff select */ 277/* diff select */
278static const struct snd_kcontrol_new wm9712_diff_sel_controls = 278static const struct snd_kcontrol_new wm9712_diff_sel_controls =
@@ -291,7 +291,9 @@ SND_SOC_DAPM_MUX("Left Capture Select", SND_SOC_NOPM, 0, 0,
291 &wm9712_capture_selectl_controls), 291 &wm9712_capture_selectl_controls),
292SND_SOC_DAPM_MUX("Right Capture Select", SND_SOC_NOPM, 0, 0, 292SND_SOC_DAPM_MUX("Right Capture Select", SND_SOC_NOPM, 0, 0,
293 &wm9712_capture_selectr_controls), 293 &wm9712_capture_selectr_controls),
294SND_SOC_DAPM_MUX("Mic Select Source", SND_SOC_NOPM, 0, 0, 294SND_SOC_DAPM_MUX("Left Mic Select Source", SND_SOC_NOPM, 0, 0,
295 &wm9712_mic_src_controls),
296SND_SOC_DAPM_MUX("Right Mic Select Source", SND_SOC_NOPM, 0, 0,
295 &wm9712_mic_src_controls), 297 &wm9712_mic_src_controls),
296SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0, 298SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0,
297 &wm9712_diff_sel_controls), 299 &wm9712_diff_sel_controls),
@@ -319,6 +321,7 @@ SND_SOC_DAPM_PGA("Out 3 PGA", AC97_INT_PAGING, 5, 1, NULL, 0),
319SND_SOC_DAPM_PGA("Line PGA", AC97_INT_PAGING, 2, 1, NULL, 0), 321SND_SOC_DAPM_PGA("Line PGA", AC97_INT_PAGING, 2, 1, NULL, 0),
320SND_SOC_DAPM_PGA("Phone PGA", AC97_INT_PAGING, 1, 1, NULL, 0), 322SND_SOC_DAPM_PGA("Phone PGA", AC97_INT_PAGING, 1, 1, NULL, 0),
321SND_SOC_DAPM_PGA("Mic PGA", AC97_INT_PAGING, 0, 1, NULL, 0), 323SND_SOC_DAPM_PGA("Mic PGA", AC97_INT_PAGING, 0, 1, NULL, 0),
324SND_SOC_DAPM_PGA("Differential Mic", SND_SOC_NOPM, 0, 0, NULL, 0),
322SND_SOC_DAPM_MICBIAS("Mic Bias", AC97_INT_PAGING, 10, 1), 325SND_SOC_DAPM_MICBIAS("Mic Bias", AC97_INT_PAGING, 10, 1),
323SND_SOC_DAPM_OUTPUT("MONOOUT"), 326SND_SOC_DAPM_OUTPUT("MONOOUT"),
324SND_SOC_DAPM_OUTPUT("HPOUTL"), 327SND_SOC_DAPM_OUTPUT("HPOUTL"),
@@ -379,6 +382,18 @@ static const struct snd_soc_dapm_route wm9712_audio_map[] = {
379 {"Mic PGA", NULL, "MIC1"}, 382 {"Mic PGA", NULL, "MIC1"},
380 {"Mic PGA", NULL, "MIC2"}, 383 {"Mic PGA", NULL, "MIC2"},
381 384
385 /* microphones */
386 {"Differential Mic", NULL, "MIC1"},
387 {"Differential Mic", NULL, "MIC2"},
388 {"Left Mic Select Source", "Mic 1", "MIC1"},
389 {"Left Mic Select Source", "Mic 2", "MIC2"},
390 {"Left Mic Select Source", "Stereo", "MIC1"},
391 {"Left Mic Select Source", "Differential", "Differential Mic"},
392 {"Right Mic Select Source", "Mic 1", "MIC1"},
393 {"Right Mic Select Source", "Mic 2", "MIC2"},
394 {"Right Mic Select Source", "Stereo", "MIC2"},
395 {"Right Mic Select Source", "Differential", "Differential Mic"},
396
382 /* left capture selector */ 397 /* left capture selector */
383 {"Left Capture Select", "Mic", "MIC1"}, 398 {"Left Capture Select", "Mic", "MIC1"},
384 {"Left Capture Select", "Speaker Mixer", "Speaker Mixer"}, 399 {"Left Capture Select", "Speaker Mixer", "Speaker Mixer"},
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 95441bfc8190..ce5e5cd254dd 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -380,14 +380,20 @@ static void mcasp_start_tx(struct davinci_audio_dev *dev)
380static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream) 380static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream)
381{ 381{
382 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 382 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
383 if (dev->txnumevt) /* enable FIFO */ 383 if (dev->txnumevt) { /* enable FIFO */
384 mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
385 FIFO_ENABLE);
384 mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, 386 mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
385 FIFO_ENABLE); 387 FIFO_ENABLE);
388 }
386 mcasp_start_tx(dev); 389 mcasp_start_tx(dev);
387 } else { 390 } else {
388 if (dev->rxnumevt) /* enable FIFO */ 391 if (dev->rxnumevt) { /* enable FIFO */
392 mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
393 FIFO_ENABLE);
389 mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, 394 mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
390 FIFO_ENABLE); 395 FIFO_ENABLE);
396 }
391 mcasp_start_rx(dev); 397 mcasp_start_rx(dev);
392 } 398 }
393} 399}
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index 28dd76c7cb1c..81d7728cf67f 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -380,13 +380,14 @@ static int imx_ssi_dai_probe(struct snd_soc_dai *dai)
380static struct snd_soc_dai_driver imx_ssi_dai = { 380static struct snd_soc_dai_driver imx_ssi_dai = {
381 .probe = imx_ssi_dai_probe, 381 .probe = imx_ssi_dai_probe,
382 .playback = { 382 .playback = {
383 .channels_min = 1, 383 /* The SSI does not support monaural audio. */
384 .channels_min = 2,
384 .channels_max = 2, 385 .channels_max = 2,
385 .rates = SNDRV_PCM_RATE_8000_96000, 386 .rates = SNDRV_PCM_RATE_8000_96000,
386 .formats = SNDRV_PCM_FMTBIT_S16_LE, 387 .formats = SNDRV_PCM_FMTBIT_S16_LE,
387 }, 388 },
388 .capture = { 389 .capture = {
389 .channels_min = 1, 390 .channels_min = 2,
390 .channels_max = 2, 391 .channels_max = 2,
391 .rates = SNDRV_PCM_RATE_8000_96000, 392 .rates = SNDRV_PCM_RATE_8000_96000,
392 .formats = SNDRV_PCM_FMTBIT_S16_LE, 393 .formats = SNDRV_PCM_FMTBIT_S16_LE,
diff --git a/sound/soc/mxs/Kconfig b/sound/soc/mxs/Kconfig
index 99a997f19bb9..b6fa77678d97 100644
--- a/sound/soc/mxs/Kconfig
+++ b/sound/soc/mxs/Kconfig
@@ -10,7 +10,7 @@ menuconfig SND_MXS_SOC
10if SND_MXS_SOC 10if SND_MXS_SOC
11 11
12config SND_SOC_MXS_SGTL5000 12config SND_SOC_MXS_SGTL5000
13 tristate "SoC Audio support for i.MX boards with sgtl5000" 13 tristate "SoC Audio support for MXS boards with sgtl5000"
14 depends on I2C 14 depends on I2C
15 select SND_SOC_SGTL5000 15 select SND_SOC_SGTL5000
16 help 16 help
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index 34835e8a9160..d33c48baaf71 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -745,7 +745,7 @@ int omap_mcbsp_6pin_src_mux(struct omap_mcbsp *mcbsp, u8 mux)
745{ 745{
746 const char *signal, *src; 746 const char *signal, *src;
747 747
748 if (mcbsp->pdata->mux_signal) 748 if (!mcbsp->pdata->mux_signal)
749 return -EINVAL; 749 return -EINVAL;
750 750
751 switch (mux) { 751 switch (mux) {
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index b7b2a1f91425..89b064650f14 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -20,7 +20,7 @@
20#include <sound/pcm_params.h> 20#include <sound/pcm_params.h>
21 21
22#include <plat/audio.h> 22#include <plat/audio.h>
23#include <plat/dma.h> 23#include <mach/dma.h>
24 24
25#include "dma.h" 25#include "dma.h"
26#include "pcm.h" 26#include "pcm.h"
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index f81c5976b961..c501af6d8dbe 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -826,7 +826,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
826 } 826 }
827 827
828 if (!rtd->cpu_dai) { 828 if (!rtd->cpu_dai) {
829 dev_dbg(card->dev, "CPU DAI %s not registered\n", 829 dev_err(card->dev, "CPU DAI %s not registered\n",
830 dai_link->cpu_dai_name); 830 dai_link->cpu_dai_name);
831 return -EPROBE_DEFER; 831 return -EPROBE_DEFER;
832 } 832 }
@@ -857,14 +857,14 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
857 } 857 }
858 858
859 if (!rtd->codec_dai) { 859 if (!rtd->codec_dai) {
860 dev_dbg(card->dev, "CODEC DAI %s not registered\n", 860 dev_err(card->dev, "CODEC DAI %s not registered\n",
861 dai_link->codec_dai_name); 861 dai_link->codec_dai_name);
862 return -EPROBE_DEFER; 862 return -EPROBE_DEFER;
863 } 863 }
864 } 864 }
865 865
866 if (!rtd->codec) { 866 if (!rtd->codec) {
867 dev_dbg(card->dev, "CODEC %s not registered\n", 867 dev_err(card->dev, "CODEC %s not registered\n",
868 dai_link->codec_name); 868 dai_link->codec_name);
869 return -EPROBE_DEFER; 869 return -EPROBE_DEFER;
870 } 870 }
@@ -888,7 +888,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
888 rtd->platform = platform; 888 rtd->platform = platform;
889 } 889 }
890 if (!rtd->platform) { 890 if (!rtd->platform) {
891 dev_dbg(card->dev, "platform %s not registered\n", 891 dev_err(card->dev, "platform %s not registered\n",
892 dai_link->platform_name); 892 dai_link->platform_name);
893 return -EPROBE_DEFER; 893 return -EPROBE_DEFER;
894 } 894 }
@@ -1481,6 +1481,8 @@ static int soc_check_aux_dev(struct snd_soc_card *card, int num)
1481 return 0; 1481 return 0;
1482 } 1482 }
1483 1483
1484 dev_err(card->dev, "%s not registered\n", aux_dev->codec_name);
1485
1484 return -EPROBE_DEFER; 1486 return -EPROBE_DEFER;
1485} 1487}
1486 1488
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 7f8b3b7428bb..0c172938b82a 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -103,7 +103,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
103 } 103 }
104 104
105 /* Report before the DAPM sync to help users updating micbias status */ 105 /* Report before the DAPM sync to help users updating micbias status */
106 blocking_notifier_call_chain(&jack->notifier, status, jack); 106 blocking_notifier_call_chain(&jack->notifier, jack->status, jack);
107 107
108 snd_soc_dapm_sync(dapm); 108 snd_soc_dapm_sync(dapm);
109 109
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 0f647d22cb4a..c41181202688 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -821,10 +821,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
821 if (++ep->use_count != 1) 821 if (++ep->use_count != 1)
822 return 0; 822 return 0;
823 823
824 /* just to be sure */
825 deactivate_urbs(ep, 0, 1);
826 wait_clear_urbs(ep);
827
828 ep->active_mask = 0; 824 ep->active_mask = 0;
829 ep->unlink_mask = 0; 825 ep->unlink_mask = 0;
830 ep->phase = 0; 826 ep->phase = 0;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index a1298f379428..62ec808ed792 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -544,6 +544,9 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
544 subs->last_frame_number = 0; 544 subs->last_frame_number = 0;
545 runtime->delay = 0; 545 runtime->delay = 0;
546 546
547 /* clear the pending deactivation on the target EPs */
548 deactivate_endpoints(subs);
549
547 /* for playback, submit the URBs now; otherwise, the first hwptr_done 550 /* for playback, submit the URBs now; otherwise, the first hwptr_done
548 * updates for all URBs would happen at the same time when starting */ 551 * updates for all URBs would happen at the same time when starting */
549 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) 552 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK)
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 2884e67ee625..213362850abd 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,10 +10,12 @@ util/ctype.c
10util/evlist.c 10util/evlist.c
11util/evsel.c 11util/evsel.c
12util/cpumap.c 12util/cpumap.c
13util/hweight.c
13util/thread_map.c 14util/thread_map.c
14util/util.c 15util/util.c
15util/xyarray.c 16util/xyarray.c
16util/cgroup.c 17util/cgroup.c
17util/debugfs.c 18util/debugfs.c
19util/rblist.c
18util/strlist.c 20util/strlist.c
19../../lib/rbtree.c 21../../lib/rbtree.c
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 246852397e30..d617f69131d7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1976,9 +1976,10 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
1976 if (copy_from_user(&csigset, sigmask_arg->sigset, 1976 if (copy_from_user(&csigset, sigmask_arg->sigset,
1977 sizeof csigset)) 1977 sizeof csigset))
1978 goto out; 1978 goto out;
1979 } 1979 sigset_from_compat(&sigset, &csigset);
1980 sigset_from_compat(&sigset, &csigset); 1980 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1981 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 1981 } else
1982 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
1982 break; 1983 break;
1983 } 1984 }
1984 default: 1985 default: